The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_reserv.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2002-2006 Rice University
    3  * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
    4  * All rights reserved.
    5  *
    6  * This software was developed for the FreeBSD Project by Alan L. Cox,
    7  * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   21  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
   22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
   24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
   25  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
   28  * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  *      Superpage reservation management module
   34  *
   35  * Any external functions defined by this module are only to be used by the
   36  * virtual memory system.
   37  */
   38 
   39 #include <sys/cdefs.h>
   40 __FBSDID("$FreeBSD: releng/11.0/sys/vm/vm_reserv.c 292469 2015-12-19 18:42:50Z alc $");
   41 
   42 #include "opt_vm.h"
   43 
   44 #include <sys/param.h>
   45 #include <sys/kernel.h>
   46 #include <sys/lock.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mutex.h>
   49 #include <sys/queue.h>
   50 #include <sys/rwlock.h>
   51 #include <sys/sbuf.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/systm.h>
   54 
   55 #include <vm/vm.h>
   56 #include <vm/vm_param.h>
   57 #include <vm/vm_object.h>
   58 #include <vm/vm_page.h>
   59 #include <vm/vm_phys.h>
   60 #include <vm/vm_radix.h>
   61 #include <vm/vm_reserv.h>
   62 
   63 /*
   64  * The reservation system supports the speculative allocation of large physical
   65  * pages ("superpages").  Speculative allocation enables the fully-automatic
   66  * utilization of superpages by the virtual memory system.  In other words, no
   67  * programmatic directives are required to use superpages.
   68  */
   69 
   70 #if VM_NRESERVLEVEL > 0
   71 
   72 /*
   73  * The number of small pages that are contained in a level 0 reservation
   74  */
   75 #define VM_LEVEL_0_NPAGES       (1 << VM_LEVEL_0_ORDER)
   76 
   77 /*
   78  * The number of bits by which a physical address is shifted to obtain the
   79  * reservation number
   80  */
   81 #define VM_LEVEL_0_SHIFT        (VM_LEVEL_0_ORDER + PAGE_SHIFT)
   82 
   83 /*
   84  * The size of a level 0 reservation in bytes
   85  */
   86 #define VM_LEVEL_0_SIZE         (1 << VM_LEVEL_0_SHIFT)
   87 
   88 /*
   89  * Computes the index of the small page underlying the given (object, pindex)
   90  * within the reservation's array of small pages.
   91  */
   92 #define VM_RESERV_INDEX(object, pindex) \
   93     (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
   94 
   95 /*
   96  * The size of a population map entry
   97  */
   98 typedef u_long          popmap_t;
   99 
  100 /*
  101  * The number of bits in a population map entry
  102  */
  103 #define NBPOPMAP        (NBBY * sizeof(popmap_t))
  104 
  105 /*
  106  * The number of population map entries in a reservation
  107  */
  108 #define NPOPMAP         howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
  109 
  110 /*
  111  * Clear a bit in the population map.
  112  */
  113 static __inline void
  114 popmap_clear(popmap_t popmap[], int i)
  115 {
  116 
  117         popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
  118 }
  119 
  120 /*
  121  * Set a bit in the population map.
  122  */
  123 static __inline void
  124 popmap_set(popmap_t popmap[], int i)
  125 {
  126 
  127         popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
  128 }
  129 
  130 /*
  131  * Is a bit in the population map clear?
  132  */
  133 static __inline boolean_t
  134 popmap_is_clear(popmap_t popmap[], int i)
  135 {
  136 
  137         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
  138 }
  139 
  140 /*
  141  * Is a bit in the population map set?
  142  */
  143 static __inline boolean_t
  144 popmap_is_set(popmap_t popmap[], int i)
  145 {
  146 
  147         return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
  148 }
  149 
  150 /*
  151  * The reservation structure
  152  *
  153  * A reservation structure is constructed whenever a large physical page is
  154  * speculatively allocated to an object.  The reservation provides the small
  155  * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
  156  * within that object.  The reservation's "popcnt" tracks the number of these
  157  * small physical pages that are in use at any given time.  When and if the
  158  * reservation is not fully utilized, it appears in the queue of partially-
  159  * populated reservations.  The reservation always appears on the containing
  160  * object's list of reservations.
  161  *
  162  * A partially-populated reservation can be broken and reclaimed at any time.
  163  */
  164 struct vm_reserv {
  165         TAILQ_ENTRY(vm_reserv) partpopq;
  166         LIST_ENTRY(vm_reserv) objq;
  167         vm_object_t     object;                 /* containing object */
  168         vm_pindex_t     pindex;                 /* offset within object */
  169         vm_page_t       pages;                  /* first page of a superpage */
  170         int             popcnt;                 /* # of pages in use */
  171         char            inpartpopq;
  172         popmap_t        popmap[NPOPMAP];        /* bit vector of used pages */
  173 };
  174 
  175 /*
  176  * The reservation array
  177  *
  178  * This array is analoguous in function to vm_page_array.  It differs in the
  179  * respect that it may contain a greater number of useful reservation
  180  * structures than there are (physical) superpages.  These "invalid"
  181  * reservation structures exist to trade-off space for time in the
  182  * implementation of vm_reserv_from_page().  Invalid reservation structures are
  183  * distinguishable from "valid" reservation structures by inspecting the
  184  * reservation's "pages" field.  Invalid reservation structures have a NULL
  185  * "pages" field.
  186  *
  187  * vm_reserv_from_page() maps a small (physical) page to an element of this
  188  * array by computing a physical reservation number from the page's physical
  189  * address.  The physical reservation number is used as the array index.
  190  *
  191  * An "active" reservation is a valid reservation structure that has a non-NULL
  192  * "object" field and a non-zero "popcnt" field.  In other words, every active
  193  * reservation belongs to a particular object.  Moreover, every active
  194  * reservation has an entry in the containing object's list of reservations.  
  195  */
  196 static vm_reserv_t vm_reserv_array;
  197 
  198 /*
  199  * The partially-populated reservation queue
  200  *
  201  * This queue enables the fast recovery of an unused cached or free small page
  202  * from a partially-populated reservation.  The reservation at the head of
  203  * this queue is the least-recently-changed, partially-populated reservation.
  204  *
  205  * Access to this queue is synchronized by the free page queue lock.
  206  */
  207 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
  208                             TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
  209 
  210 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
  211 
  212 static long vm_reserv_broken;
  213 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
  214     &vm_reserv_broken, 0, "Cumulative number of broken reservations");
  215 
  216 static long vm_reserv_freed;
  217 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
  218     &vm_reserv_freed, 0, "Cumulative number of freed reservations");
  219 
  220 static int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
  221 
  222 SYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
  223     sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
  224 
  225 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
  226 
  227 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
  228     sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
  229 
  230 static long vm_reserv_reclaimed;
  231 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
  232     &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
  233 
  234 static void             vm_reserv_break(vm_reserv_t rv, vm_page_t m);
  235 static void             vm_reserv_depopulate(vm_reserv_t rv, int index);
  236 static vm_reserv_t      vm_reserv_from_page(vm_page_t m);
  237 static boolean_t        vm_reserv_has_pindex(vm_reserv_t rv,
  238                             vm_pindex_t pindex);
  239 static void             vm_reserv_populate(vm_reserv_t rv, int index);
  240 static void             vm_reserv_reclaim(vm_reserv_t rv);
  241 
  242 /*
  243  * Returns the current number of full reservations.
  244  *
  245  * Since the number of full reservations is computed without acquiring the
  246  * free page queue lock, the returned value may be inexact.
  247  */
  248 static int
  249 sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
  250 {
  251         vm_paddr_t paddr;
  252         struct vm_phys_seg *seg;
  253         vm_reserv_t rv;
  254         int fullpop, segind;
  255 
  256         fullpop = 0;
  257         for (segind = 0; segind < vm_phys_nsegs; segind++) {
  258                 seg = &vm_phys_segs[segind];
  259                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
  260                 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
  261                         rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
  262                         fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
  263                         paddr += VM_LEVEL_0_SIZE;
  264                 }
  265         }
  266         return (sysctl_handle_int(oidp, &fullpop, 0, req));
  267 }
  268 
  269 /*
  270  * Describes the current state of the partially-populated reservation queue.
  271  */
  272 static int
  273 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
  274 {
  275         struct sbuf sbuf;
  276         vm_reserv_t rv;
  277         int counter, error, level, unused_pages;
  278 
  279         error = sysctl_wire_old_buffer(req, 0);
  280         if (error != 0)
  281                 return (error);
  282         sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
  283         sbuf_printf(&sbuf, "\nLEVEL     SIZE  NUMBER\n\n");
  284         for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
  285                 counter = 0;
  286                 unused_pages = 0;
  287                 mtx_lock(&vm_page_queue_free_mtx);
  288                 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
  289                         counter++;
  290                         unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
  291                 }
  292                 mtx_unlock(&vm_page_queue_free_mtx);
  293                 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
  294                     unused_pages * ((int)PAGE_SIZE / 1024), counter);
  295         }
  296         error = sbuf_finish(&sbuf);
  297         sbuf_delete(&sbuf);
  298         return (error);
  299 }
  300 
  301 /*
  302  * Reduces the given reservation's population count.  If the population count
  303  * becomes zero, the reservation is destroyed.  Additionally, moves the
  304  * reservation to the tail of the partially-populated reservation queue if the
  305  * population count is non-zero.
  306  *
  307  * The free page queue lock must be held.
  308  */
  309 static void
  310 vm_reserv_depopulate(vm_reserv_t rv, int index)
  311 {
  312 
  313         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  314         KASSERT(rv->object != NULL,
  315             ("vm_reserv_depopulate: reserv %p is free", rv));
  316         KASSERT(popmap_is_set(rv->popmap, index),
  317             ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
  318             index));
  319         KASSERT(rv->popcnt > 0,
  320             ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
  321         if (rv->inpartpopq) {
  322                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  323                 rv->inpartpopq = FALSE;
  324         } else {
  325                 KASSERT(rv->pages->psind == 1,
  326                     ("vm_reserv_depopulate: reserv %p is already demoted",
  327                     rv));
  328                 rv->pages->psind = 0;
  329         }
  330         popmap_clear(rv->popmap, index);
  331         rv->popcnt--;
  332         if (rv->popcnt == 0) {
  333                 LIST_REMOVE(rv, objq);
  334                 rv->object = NULL;
  335                 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
  336                 vm_reserv_freed++;
  337         } else {
  338                 rv->inpartpopq = TRUE;
  339                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
  340         }
  341 }
  342 
  343 /*
  344  * Returns the reservation to which the given page might belong.
  345  */
  346 static __inline vm_reserv_t
  347 vm_reserv_from_page(vm_page_t m)
  348 {
  349 
  350         return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
  351 }
  352 
  353 /*
  354  * Returns TRUE if the given reservation contains the given page index and
  355  * FALSE otherwise.
  356  */
  357 static __inline boolean_t
  358 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
  359 {
  360 
  361         return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
  362 }
  363 
  364 /*
  365  * Increases the given reservation's population count.  Moves the reservation
  366  * to the tail of the partially-populated reservation queue.
  367  *
  368  * The free page queue must be locked.
  369  */
  370 static void
  371 vm_reserv_populate(vm_reserv_t rv, int index)
  372 {
  373 
  374         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  375         KASSERT(rv->object != NULL,
  376             ("vm_reserv_populate: reserv %p is free", rv));
  377         KASSERT(popmap_is_clear(rv->popmap, index),
  378             ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
  379             index));
  380         KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
  381             ("vm_reserv_populate: reserv %p is already full", rv));
  382         KASSERT(rv->pages->psind == 0,
  383             ("vm_reserv_populate: reserv %p is already promoted", rv));
  384         if (rv->inpartpopq) {
  385                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  386                 rv->inpartpopq = FALSE;
  387         }
  388         popmap_set(rv->popmap, index);
  389         rv->popcnt++;
  390         if (rv->popcnt < VM_LEVEL_0_NPAGES) {
  391                 rv->inpartpopq = TRUE;
  392                 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
  393         } else
  394                 rv->pages->psind = 1;
  395 }
  396 
  397 /*
  398  * Allocates a contiguous set of physical pages of the given size "npages"
  399  * from existing or newly created reservations.  All of the physical pages
  400  * must be at or above the given physical address "low" and below the given
  401  * physical address "high".  The given value "alignment" determines the
  402  * alignment of the first physical page in the set.  If the given value
  403  * "boundary" is non-zero, then the set of physical pages cannot cross any
  404  * physical address boundary that is a multiple of that value.  Both
  405  * "alignment" and "boundary" must be a power of two.
  406  *
  407  * The object and free page queue must be locked.
  408  */
  409 vm_page_t
  410 vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
  411     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
  412 {
  413         vm_paddr_t pa, size;
  414         vm_page_t m, m_ret, mpred, msucc;
  415         vm_pindex_t first, leftcap, rightcap;
  416         vm_reserv_t rv;
  417         u_long allocpages, maxpages, minpages;
  418         int i, index, n;
  419 
  420         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  421         VM_OBJECT_ASSERT_WLOCKED(object);
  422         KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
  423 
  424         /*
  425          * Is a reservation fundamentally impossible?
  426          */
  427         if (pindex < VM_RESERV_INDEX(object, pindex) ||
  428             pindex + npages > object->size)
  429                 return (NULL);
  430 
  431         /*
  432          * All reservations of a particular size have the same alignment.
  433          * Assuming that the first page is allocated from a reservation, the
  434          * least significant bits of its physical address can be determined
  435          * from its offset from the beginning of the reservation and the size
  436          * of the reservation.
  437          *
  438          * Could the specified index within a reservation of the smallest
  439          * possible size satisfy the alignment and boundary requirements?
  440          */
  441         pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
  442         if ((pa & (alignment - 1)) != 0)
  443                 return (NULL);
  444         size = npages << PAGE_SHIFT;
  445         if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
  446                 return (NULL);
  447 
  448         /*
  449          * Look for an existing reservation.
  450          */
  451         mpred = vm_radix_lookup_le(&object->rtree, pindex);
  452         if (mpred != NULL) {
  453                 KASSERT(mpred->pindex < pindex,
  454                     ("vm_reserv_alloc_contig: pindex already allocated"));
  455                 rv = vm_reserv_from_page(mpred);
  456                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  457                         goto found;
  458                 msucc = TAILQ_NEXT(mpred, listq);
  459         } else
  460                 msucc = TAILQ_FIRST(&object->memq);
  461         if (msucc != NULL) {
  462                 KASSERT(msucc->pindex > pindex,
  463                     ("vm_reserv_alloc_contig: pindex already allocated"));
  464                 rv = vm_reserv_from_page(msucc);
  465                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  466                         goto found;
  467         }
  468 
  469         /*
  470          * Could at least one reservation fit between the first index to the
  471          * left that can be used ("leftcap") and the first index to the right
  472          * that cannot be used ("rightcap")?
  473          */
  474         first = pindex - VM_RESERV_INDEX(object, pindex);
  475         if (mpred != NULL) {
  476                 if ((rv = vm_reserv_from_page(mpred))->object != object)
  477                         leftcap = mpred->pindex + 1;
  478                 else
  479                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
  480                 if (leftcap > first)
  481                         return (NULL);
  482         }
  483         minpages = VM_RESERV_INDEX(object, pindex) + npages;
  484         maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
  485         allocpages = maxpages;
  486         if (msucc != NULL) {
  487                 if ((rv = vm_reserv_from_page(msucc))->object != object)
  488                         rightcap = msucc->pindex;
  489                 else
  490                         rightcap = rv->pindex;
  491                 if (first + maxpages > rightcap) {
  492                         if (maxpages == VM_LEVEL_0_NPAGES)
  493                                 return (NULL);
  494 
  495                         /*
  496                          * At least one reservation will fit between "leftcap"
  497                          * and "rightcap".  However, a reservation for the
  498                          * last of the requested pages will not fit.  Reduce
  499                          * the size of the upcoming allocation accordingly.
  500                          */
  501                         allocpages = minpages;
  502                 }
  503         }
  504 
  505         /*
  506          * Would the last new reservation extend past the end of the object?
  507          */
  508         if (first + maxpages > object->size) {
  509                 /*
  510                  * Don't allocate the last new reservation if the object is a
  511                  * vnode or backed by another object that is a vnode. 
  512                  */
  513                 if (object->type == OBJT_VNODE ||
  514                     (object->backing_object != NULL &&
  515                     object->backing_object->type == OBJT_VNODE)) {
  516                         if (maxpages == VM_LEVEL_0_NPAGES)
  517                                 return (NULL);
  518                         allocpages = minpages;
  519                 }
  520                 /* Speculate that the object may grow. */
  521         }
  522 
  523         /*
  524          * Allocate the physical pages.  The alignment and boundary specified
  525          * for this allocation may be different from the alignment and
  526          * boundary specified for the requested pages.  For instance, the
  527          * specified index may not be the first page within the first new
  528          * reservation.
  529          */
  530         m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
  531             VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
  532         if (m == NULL)
  533                 return (NULL);
  534 
  535         /*
  536          * The allocated physical pages always begin at a reservation
  537          * boundary, but they do not always end at a reservation boundary.
  538          * Initialize every reservation that is completely covered by the
  539          * allocated physical pages.
  540          */
  541         m_ret = NULL;
  542         index = VM_RESERV_INDEX(object, pindex);
  543         do {
  544                 rv = vm_reserv_from_page(m);
  545                 KASSERT(rv->pages == m,
  546                     ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
  547                     rv));
  548                 KASSERT(rv->object == NULL,
  549                     ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
  550                 LIST_INSERT_HEAD(&object->rvq, rv, objq);
  551                 rv->object = object;
  552                 rv->pindex = first;
  553                 KASSERT(rv->popcnt == 0,
  554                     ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
  555                     rv));
  556                 KASSERT(!rv->inpartpopq,
  557                     ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
  558                     rv));
  559                 for (i = 0; i < NPOPMAP; i++)
  560                         KASSERT(rv->popmap[i] == 0,
  561                     ("vm_reserv_alloc_contig: reserv %p's popmap is corrupted",
  562                             rv));
  563                 n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
  564                 for (i = 0; i < n; i++)
  565                         vm_reserv_populate(rv, index + i);
  566                 npages -= n;
  567                 if (m_ret == NULL) {
  568                         m_ret = &rv->pages[index];
  569                         index = 0;
  570                 }
  571                 m += VM_LEVEL_0_NPAGES;
  572                 first += VM_LEVEL_0_NPAGES;
  573                 allocpages -= VM_LEVEL_0_NPAGES;
  574         } while (allocpages >= VM_LEVEL_0_NPAGES);
  575         return (m_ret);
  576 
  577         /*
  578          * Found a matching reservation.
  579          */
  580 found:
  581         index = VM_RESERV_INDEX(object, pindex);
  582         /* Does the allocation fit within the reservation? */
  583         if (index + npages > VM_LEVEL_0_NPAGES)
  584                 return (NULL);
  585         m = &rv->pages[index];
  586         pa = VM_PAGE_TO_PHYS(m);
  587         if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
  588             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
  589                 return (NULL);
  590         /* Handle vm_page_rename(m, new_object, ...). */
  591         for (i = 0; i < npages; i++)
  592                 if (popmap_is_set(rv->popmap, index + i))
  593                         return (NULL);
  594         for (i = 0; i < npages; i++)
  595                 vm_reserv_populate(rv, index + i);
  596         return (m);
  597 }
  598 
  599 /*
  600  * Allocates a page from an existing or newly-created reservation.
  601  *
  602  * The page "mpred" must immediately precede the offset "pindex" within the
  603  * specified object.
  604  *
  605  * The object and free page queue must be locked.
  606  */
  607 vm_page_t
  608 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
  609 {
  610         vm_page_t m, msucc;
  611         vm_pindex_t first, leftcap, rightcap;
  612         vm_reserv_t rv;
  613         int i, index;
  614 
  615         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  616         VM_OBJECT_ASSERT_WLOCKED(object);
  617 
  618         /*
  619          * Is a reservation fundamentally impossible?
  620          */
  621         if (pindex < VM_RESERV_INDEX(object, pindex) ||
  622             pindex >= object->size)
  623                 return (NULL);
  624 
  625         /*
  626          * Look for an existing reservation.
  627          */
  628         if (mpred != NULL) {
  629                 KASSERT(mpred->object == object,
  630                     ("vm_reserv_alloc_page: object doesn't contain mpred"));
  631                 KASSERT(mpred->pindex < pindex,
  632                     ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
  633                 rv = vm_reserv_from_page(mpred);
  634                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  635                         goto found;
  636                 msucc = TAILQ_NEXT(mpred, listq);
  637         } else
  638                 msucc = TAILQ_FIRST(&object->memq);
  639         if (msucc != NULL) {
  640                 KASSERT(msucc->pindex > pindex,
  641                     ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
  642                 rv = vm_reserv_from_page(msucc);
  643                 if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
  644                         goto found;
  645         }
  646 
  647         /*
  648          * Could a reservation fit between the first index to the left that
  649          * can be used and the first index to the right that cannot be used?
  650          */
  651         first = pindex - VM_RESERV_INDEX(object, pindex);
  652         if (mpred != NULL) {
  653                 if ((rv = vm_reserv_from_page(mpred))->object != object)
  654                         leftcap = mpred->pindex + 1;
  655                 else
  656                         leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
  657                 if (leftcap > first)
  658                         return (NULL);
  659         }
  660         if (msucc != NULL) {
  661                 if ((rv = vm_reserv_from_page(msucc))->object != object)
  662                         rightcap = msucc->pindex;
  663                 else
  664                         rightcap = rv->pindex;
  665                 if (first + VM_LEVEL_0_NPAGES > rightcap)
  666                         return (NULL);
  667         }
  668 
  669         /*
  670          * Would a new reservation extend past the end of the object? 
  671          */
  672         if (first + VM_LEVEL_0_NPAGES > object->size) {
  673                 /*
  674                  * Don't allocate a new reservation if the object is a vnode or
  675                  * backed by another object that is a vnode. 
  676                  */
  677                 if (object->type == OBJT_VNODE ||
  678                     (object->backing_object != NULL &&
  679                     object->backing_object->type == OBJT_VNODE))
  680                         return (NULL);
  681                 /* Speculate that the object may grow. */
  682         }
  683 
  684         /*
  685          * Allocate and populate the new reservation.
  686          */
  687         m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
  688         if (m == NULL)
  689                 return (NULL);
  690         rv = vm_reserv_from_page(m);
  691         KASSERT(rv->pages == m,
  692             ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
  693         KASSERT(rv->object == NULL,
  694             ("vm_reserv_alloc_page: reserv %p isn't free", rv));
  695         LIST_INSERT_HEAD(&object->rvq, rv, objq);
  696         rv->object = object;
  697         rv->pindex = first;
  698         KASSERT(rv->popcnt == 0,
  699             ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
  700         KASSERT(!rv->inpartpopq,
  701             ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
  702         for (i = 0; i < NPOPMAP; i++)
  703                 KASSERT(rv->popmap[i] == 0,
  704                     ("vm_reserv_alloc_page: reserv %p's popmap is corrupted",
  705                     rv));
  706         index = VM_RESERV_INDEX(object, pindex);
  707         vm_reserv_populate(rv, index);
  708         return (&rv->pages[index]);
  709 
  710         /*
  711          * Found a matching reservation.
  712          */
  713 found:
  714         index = VM_RESERV_INDEX(object, pindex);
  715         m = &rv->pages[index];
  716         /* Handle vm_page_rename(m, new_object, ...). */
  717         if (popmap_is_set(rv->popmap, index))
  718                 return (NULL);
  719         vm_reserv_populate(rv, index);
  720         return (m);
  721 }
  722 
  723 /*
  724  * Breaks the given reservation.  Except for the specified cached or free
  725  * page, all cached and free pages in the reservation are returned to the
  726  * physical memory allocator.  The reservation's population count and map are
  727  * reset to their initial state.
  728  *
  729  * The given reservation must not be in the partially-populated reservation
  730  * queue.  The free page queue lock must be held.
  731  */
  732 static void
  733 vm_reserv_break(vm_reserv_t rv, vm_page_t m)
  734 {
  735         int begin_zeroes, hi, i, lo;
  736 
  737         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  738         KASSERT(rv->object != NULL,
  739             ("vm_reserv_break: reserv %p is free", rv));
  740         KASSERT(!rv->inpartpopq,
  741             ("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv));
  742         LIST_REMOVE(rv, objq);
  743         rv->object = NULL;
  744         if (m != NULL) {
  745                 /*
  746                  * Since the reservation is being broken, there is no harm in
  747                  * abusing the population map to stop "m" from being returned
  748                  * to the physical memory allocator.
  749                  */
  750                 i = m - rv->pages;
  751                 KASSERT(popmap_is_clear(rv->popmap, i),
  752                     ("vm_reserv_break: reserv %p's popmap is corrupted", rv));
  753                 popmap_set(rv->popmap, i);
  754                 rv->popcnt++;
  755         }
  756         i = hi = 0;
  757         do {
  758                 /* Find the next 0 bit.  Any previous 0 bits are < "hi". */
  759                 lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
  760                 if (lo == 0) {
  761                         /* Redundantly clears bits < "hi". */
  762                         rv->popmap[i] = 0;
  763                         rv->popcnt -= NBPOPMAP - hi;
  764                         while (++i < NPOPMAP) {
  765                                 lo = ffsl(~rv->popmap[i]);
  766                                 if (lo == 0) {
  767                                         rv->popmap[i] = 0;
  768                                         rv->popcnt -= NBPOPMAP;
  769                                 } else
  770                                         break;
  771                         }
  772                         if (i == NPOPMAP)
  773                                 break;
  774                         hi = 0;
  775                 }
  776                 KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
  777                 /* Convert from ffsl() to ordinary bit numbering. */
  778                 lo--;
  779                 if (lo > 0) {
  780                         /* Redundantly clears bits < "hi". */
  781                         rv->popmap[i] &= ~((1UL << lo) - 1);
  782                         rv->popcnt -= lo - hi;
  783                 }
  784                 begin_zeroes = NBPOPMAP * i + lo;
  785                 /* Find the next 1 bit. */
  786                 do
  787                         hi = ffsl(rv->popmap[i]);
  788                 while (hi == 0 && ++i < NPOPMAP);
  789                 if (i != NPOPMAP)
  790                         /* Convert from ffsl() to ordinary bit numbering. */
  791                         hi--;
  792                 vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
  793                     hi - begin_zeroes);
  794         } while (i < NPOPMAP);
  795         KASSERT(rv->popcnt == 0,
  796             ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
  797         vm_reserv_broken++;
  798 }
  799 
  800 /*
  801  * Breaks all reservations belonging to the given object.
  802  */
  803 void
  804 vm_reserv_break_all(vm_object_t object)
  805 {
  806         vm_reserv_t rv;
  807 
  808         mtx_lock(&vm_page_queue_free_mtx);
  809         while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
  810                 KASSERT(rv->object == object,
  811                     ("vm_reserv_break_all: reserv %p is corrupted", rv));
  812                 if (rv->inpartpopq) {
  813                         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  814                         rv->inpartpopq = FALSE;
  815                 }
  816                 vm_reserv_break(rv, NULL);
  817         }
  818         mtx_unlock(&vm_page_queue_free_mtx);
  819 }
  820 
  821 /*
  822  * Frees the given page if it belongs to a reservation.  Returns TRUE if the
  823  * page is freed and FALSE otherwise.
  824  *
  825  * The free page queue lock must be held.
  826  */
  827 boolean_t
  828 vm_reserv_free_page(vm_page_t m)
  829 {
  830         vm_reserv_t rv;
  831 
  832         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  833         rv = vm_reserv_from_page(m);
  834         if (rv->object == NULL)
  835                 return (FALSE);
  836         vm_reserv_depopulate(rv, m - rv->pages);
  837         return (TRUE);
  838 }
  839 
  840 /*
  841  * Initializes the reservation management system.  Specifically, initializes
  842  * the reservation array.
  843  *
  844  * Requires that vm_page_array and first_page are initialized!
  845  */
  846 void
  847 vm_reserv_init(void)
  848 {
  849         vm_paddr_t paddr;
  850         struct vm_phys_seg *seg;
  851         int segind;
  852 
  853         /*
  854          * Initialize the reservation array.  Specifically, initialize the
  855          * "pages" field for every element that has an underlying superpage.
  856          */
  857         for (segind = 0; segind < vm_phys_nsegs; segind++) {
  858                 seg = &vm_phys_segs[segind];
  859                 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
  860                 while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
  861                         vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
  862                             PHYS_TO_VM_PAGE(paddr);
  863                         paddr += VM_LEVEL_0_SIZE;
  864                 }
  865         }
  866 }
  867 
  868 /*
  869  * Returns true if the given page belongs to a reservation and that page is
  870  * free.  Otherwise, returns false.
  871  */
  872 bool
  873 vm_reserv_is_page_free(vm_page_t m)
  874 {
  875         vm_reserv_t rv;
  876 
  877         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  878         rv = vm_reserv_from_page(m);
  879         if (rv->object == NULL)
  880                 return (false);
  881         return (popmap_is_clear(rv->popmap, m - rv->pages));
  882 }
  883 
  884 /*
  885  * If the given page belongs to a reservation, returns the level of that
  886  * reservation.  Otherwise, returns -1.
  887  */
  888 int
  889 vm_reserv_level(vm_page_t m)
  890 {
  891         vm_reserv_t rv;
  892 
  893         rv = vm_reserv_from_page(m);
  894         return (rv->object != NULL ? 0 : -1);
  895 }
  896 
  897 /*
  898  * Returns a reservation level if the given page belongs to a fully-populated
  899  * reservation and -1 otherwise.
  900  */
  901 int
  902 vm_reserv_level_iffullpop(vm_page_t m)
  903 {
  904         vm_reserv_t rv;
  905 
  906         rv = vm_reserv_from_page(m);
  907         return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
  908 }
  909 
  910 /*
  911  * Prepare for the reactivation of a cached page.
  912  *
  913  * First, suppose that the given page "m" was allocated individually, i.e., not
  914  * as part of a reservation, and cached.  Then, suppose a reservation
  915  * containing "m" is allocated by the same object.  Although "m" and the
  916  * reservation belong to the same object, "m"'s pindex may not match the
  917  * reservation's.
  918  *
  919  * The free page queue must be locked.
  920  */
  921 boolean_t
  922 vm_reserv_reactivate_page(vm_page_t m)
  923 {
  924         vm_reserv_t rv;
  925         int index;
  926 
  927         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  928         rv = vm_reserv_from_page(m);
  929         if (rv->object == NULL)
  930                 return (FALSE);
  931         KASSERT((m->flags & PG_CACHED) != 0,
  932             ("vm_reserv_reactivate_page: page %p is not cached", m));
  933         if (m->object == rv->object &&
  934             m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
  935             m->pindex)))
  936                 vm_reserv_populate(rv, index);
  937         else {
  938                 KASSERT(rv->inpartpopq,
  939             ("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
  940                     rv));
  941                 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  942                 rv->inpartpopq = FALSE;
  943                 /* Don't release "m" to the physical memory allocator. */
  944                 vm_reserv_break(rv, m);
  945         }
  946         return (TRUE);
  947 }
  948 
  949 /*
  950  * Breaks the given partially-populated reservation, releasing its cached and
  951  * free pages to the physical memory allocator.
  952  *
  953  * The free page queue lock must be held.
  954  */
  955 static void
  956 vm_reserv_reclaim(vm_reserv_t rv)
  957 {
  958 
  959         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  960         KASSERT(rv->inpartpopq,
  961             ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
  962         TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
  963         rv->inpartpopq = FALSE;
  964         vm_reserv_break(rv, NULL);
  965         vm_reserv_reclaimed++;
  966 }
  967 
  968 /*
  969  * Breaks the reservation at the head of the partially-populated reservation
  970  * queue, releasing its cached and free pages to the physical memory
  971  * allocator.  Returns TRUE if a reservation is broken and FALSE otherwise.
  972  *
  973  * The free page queue lock must be held.
  974  */
  975 boolean_t
  976 vm_reserv_reclaim_inactive(void)
  977 {
  978         vm_reserv_t rv;
  979 
  980         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
  981         if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
  982                 vm_reserv_reclaim(rv);
  983                 return (TRUE);
  984         }
  985         return (FALSE);
  986 }
  987 
  988 /*
  989  * Searches the partially-populated reservation queue for the least recently
  990  * active reservation with unused pages, i.e., cached or free, that satisfy the
  991  * given request for contiguous physical memory.  If a satisfactory reservation
  992  * is found, it is broken.  Returns TRUE if a reservation is broken and FALSE
  993  * otherwise.
  994  *
  995  * The free page queue lock must be held.
  996  */
  997 boolean_t
  998 vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
  999     u_long alignment, vm_paddr_t boundary)
 1000 {
 1001         vm_paddr_t pa, size;
 1002         vm_reserv_t rv;
 1003         int hi, i, lo, low_index, next_free;
 1004 
 1005         mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
 1006         if (npages > VM_LEVEL_0_NPAGES - 1)
 1007                 return (FALSE);
 1008         size = npages << PAGE_SHIFT;
 1009         TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
 1010                 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
 1011                 if (pa + PAGE_SIZE - size < low) {
 1012                         /* This entire reservation is too low; go to next. */
 1013                         continue;
 1014                 }
 1015                 pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
 1016                 if (pa + size > high) {
 1017                         /* This entire reservation is too high; go to next. */
 1018                         continue;
 1019                 }
 1020                 if (pa < low) {
 1021                         /* Start the search for free pages at "low". */
 1022                         low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT;
 1023                         i = low_index / NBPOPMAP;
 1024                         hi = low_index % NBPOPMAP;
 1025                 } else
 1026                         i = hi = 0;
 1027                 do {
 1028                         /* Find the next free page. */
 1029                         lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
 1030                         while (lo == 0 && ++i < NPOPMAP)
 1031                                 lo = ffsl(~rv->popmap[i]);
 1032                         if (i == NPOPMAP)
 1033                                 break;
 1034                         /* Convert from ffsl() to ordinary bit numbering. */
 1035                         lo--;
 1036                         next_free = NBPOPMAP * i + lo;
 1037                         pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
 1038                         KASSERT(pa >= low,
 1039                             ("vm_reserv_reclaim_contig: pa is too low"));
 1040                         if (pa + size > high) {
 1041                                 /* The rest of this reservation is too high. */
 1042                                 break;
 1043                         } else if ((pa & (alignment - 1)) != 0 ||
 1044                             ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
 1045                                 /*
 1046                                  * The current page doesn't meet the alignment
 1047                                  * and/or boundary requirements.  Continue
 1048                                  * searching this reservation until the rest
 1049                                  * of its free pages are either excluded or
 1050                                  * exhausted.
 1051                                  */
 1052                                 hi = lo + 1;
 1053                                 if (hi >= NBPOPMAP) {
 1054                                         hi = 0;
 1055                                         i++;
 1056                                 }
 1057                                 continue;
 1058                         }
 1059                         /* Find the next used page. */
 1060                         hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
 1061                         while (hi == 0 && ++i < NPOPMAP) {
 1062                                 if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
 1063                                     size) {
 1064                                         vm_reserv_reclaim(rv);
 1065                                         return (TRUE);
 1066                                 }
 1067                                 hi = ffsl(rv->popmap[i]);
 1068                         }
 1069                         /* Convert from ffsl() to ordinary bit numbering. */
 1070                         if (i != NPOPMAP)
 1071                                 hi--;
 1072                         if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
 1073                             size) {
 1074                                 vm_reserv_reclaim(rv);
 1075                                 return (TRUE);
 1076                         }
 1077                 } while (i < NPOPMAP);
 1078         }
 1079         return (FALSE);
 1080 }
 1081 
 1082 /*
 1083  * Transfers the reservation underlying the given page to a new object.
 1084  *
 1085  * The object must be locked.
 1086  */
 1087 void
 1088 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
 1089     vm_pindex_t old_object_offset)
 1090 {
 1091         vm_reserv_t rv;
 1092 
 1093         VM_OBJECT_ASSERT_WLOCKED(new_object);
 1094         rv = vm_reserv_from_page(m);
 1095         if (rv->object == old_object) {
 1096                 mtx_lock(&vm_page_queue_free_mtx);
 1097                 if (rv->object == old_object) {
 1098                         LIST_REMOVE(rv, objq);
 1099                         LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
 1100                         rv->object = new_object;
 1101                         rv->pindex -= old_object_offset;
 1102                 }
 1103                 mtx_unlock(&vm_page_queue_free_mtx);
 1104         }
 1105 }
 1106 
 1107 /*
 1108  * Returns the size (in bytes) of a reservation of the specified level.
 1109  */
 1110 int
 1111 vm_reserv_size(int level)
 1112 {
 1113 
 1114         switch (level) {
 1115         case 0:
 1116                 return (VM_LEVEL_0_SIZE);
 1117         case -1:
 1118                 return (PAGE_SIZE);
 1119         default:
 1120                 return (0);
 1121         }
 1122 }
 1123 
 1124 /*
 1125  * Allocates the virtual and physical memory required by the reservation
 1126  * management system's data structures, in particular, the reservation array.
 1127  */
 1128 vm_paddr_t
 1129 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
 1130 {
 1131         vm_paddr_t new_end;
 1132         size_t size;
 1133 
 1134         /*
 1135          * Calculate the size (in bytes) of the reservation array.  Round up
 1136          * from "high_water" because every small page is mapped to an element
 1137          * in the reservation array based on its physical address.  Thus, the
 1138          * number of elements in the reservation array can be greater than the
 1139          * number of superpages. 
 1140          */
 1141         size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
 1142 
 1143         /*
 1144          * Allocate and map the physical memory for the reservation array.  The
 1145          * next available virtual address is returned by reference.
 1146          */
 1147         new_end = end - round_page(size);
 1148         vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
 1149             VM_PROT_READ | VM_PROT_WRITE);
 1150         bzero(vm_reserv_array, size);
 1151 
 1152         /*
 1153          * Return the next available physical address.
 1154          */
 1155         return (new_end);
 1156 }
 1157 
 1158 #endif  /* VM_NRESERVLEVEL > 0 */

Cache object: 45460f20aa6b3b44ed7bf6da843a43e1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.