The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software contributed to Berkeley by
    8  * The Mach Operating System project at Carnegie-Mellon University.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      from: @(#)vm_page.h     8.2 (Berkeley) 12/13/93
   35  *
   36  *
   37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   38  * All rights reserved.
   39  *
   40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   41  *
   42  * Permission to use, copy, modify and distribute this software and
   43  * its documentation is hereby granted, provided that both the copyright
   44  * notice and this permission notice appear in all copies of the
   45  * software, derivative works or modified versions, and any portions
   46  * thereof, and that both notices appear in supporting documentation.
   47  *
   48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   51  *
   52  * Carnegie Mellon requests users of this software to return to
   53  *
   54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   55  *  School of Computer Science
   56  *  Carnegie Mellon University
   57  *  Pittsburgh PA 15213-3890
   58  *
   59  * any improvements or extensions that they make and grant Carnegie the
   60  * rights to redistribute these changes.
   61  *
   62  * $FreeBSD$
   63  */
   64 
   65 /*
   66  *      Resident memory system definitions.
   67  */
   68 
   69 #ifndef _VM_PAGE_
   70 #define _VM_PAGE_
   71 
   72 #include <vm/pmap.h>
   73 #include <vm/_vm_phys.h>
   74 
   75 /*
   76  *      Management of resident (logical) pages.
   77  *
   78  *      A small structure is kept for each resident
   79  *      page, indexed by page number.  Each structure
   80  *      is an element of several collections:
   81  *
   82  *              A radix tree used to quickly
   83  *              perform object/offset lookups
   84  *
   85  *              A list of all pages for a given object,
   86  *              so they can be quickly deactivated at
   87  *              time of deallocation.
   88  *
   89  *              An ordered list of pages due for pageout.
   90  *
   91  *      In addition, the structure contains the object
   92  *      and offset to which this page belongs (for pageout),
   93  *      and sundry status bits.
   94  *
   95  *      In general, operations on this structure's mutable fields are
   96  *      synchronized using either one of or a combination of locks.  If a
   97  *      field is annotated with two of these locks then holding either is
   98  *      sufficient for read access but both are required for write access.
   99  *      The queue lock for a page depends on the value of its queue field and is
  100  *      described in detail below.
  101  *
  102  *      The following annotations are possible:
  103  *      (A) the field must be accessed using atomic(9) and may require
  104  *          additional synchronization.
  105  *      (B) the page busy lock.
  106  *      (C) the field is immutable.
  107  *      (F) the per-domain lock for the free queues.
  108  *      (M) Machine dependent, defined by pmap layer.
  109  *      (O) the object that the page belongs to.
  110  *      (Q) the page's queue lock.
  111  *
  112  *      The busy lock is an embedded reader-writer lock that protects the
  113  *      page's contents and identity (i.e., its <object, pindex> tuple) as
  114  *      well as certain valid/dirty modifications.  To avoid bloating the
  115  *      the page structure, the busy lock lacks some of the features available
  116  *      the kernel's general-purpose synchronization primitives.  As a result,
  117  *      busy lock ordering rules are not verified, lock recursion is not
  118  *      detected, and an attempt to xbusy a busy page or sbusy an xbusy page
  119  *      results will trigger a panic rather than causing the thread to block.
  120  *      vm_page_sleep_if_busy() can be used to sleep until the page's busy
  121  *      state changes, after which the caller must re-lookup the page and
  122  *      re-evaluate its state.  vm_page_busy_acquire() will block until
  123  *      the lock is acquired.
  124  *
  125  *      The valid field is protected by the page busy lock (B) and object
  126  *      lock (O).  Transitions from invalid to valid are generally done
  127  *      via I/O or zero filling and do not require the object lock.
  128  *      These must be protected with the busy lock to prevent page-in or
  129  *      creation races.  Page invalidation generally happens as a result
  130  *      of truncate or msync.  When invalidated, pages must not be present
  131  *      in pmap and must hold the object lock to prevent concurrent
  132  *      speculative read-only mappings that do not require busy.  I/O
  133  *      routines may check for validity without a lock if they are prepared
  134  *      to handle invalidation races with higher level locks (vnode) or are
  135  *      unconcerned with races so long as they hold a reference to prevent
  136  *      recycling.  When a valid bit is set while holding a shared busy
  137  *      lock (A) atomic operations are used to protect against concurrent
  138  *      modification.
  139  *
  140  *      In contrast, the synchronization of accesses to the page's
  141  *      dirty field is a mix of machine dependent (M) and busy (B).  In
  142  *      the machine-independent layer, the page busy must be held to
  143  *      operate on the field.  However, the pmap layer is permitted to
  144  *      set all bits within the field without holding that lock.  If the
  145  *      underlying architecture does not support atomic read-modify-write
  146  *      operations on the field's type, then the machine-independent
  147  *      layer uses a 32-bit atomic on the aligned 32-bit word that
  148  *      contains the dirty field.  In the machine-independent layer,
  149  *      the implementation of read-modify-write operations on the
  150  *      field is encapsulated in vm_page_clear_dirty_mask().  An
  151  *      exclusive busy lock combined with pmap_remove_{write/all}() is the
  152  *      only way to ensure a page can not become dirty.  I/O generally
  153  *      removes the page from pmap to ensure exclusive access and atomic
  154  *      writes.
  155  *
  156  *      The ref_count field tracks references to the page.  References that
  157  *      prevent the page from being reclaimable are called wirings and are
  158  *      counted in the low bits of ref_count.  The containing object's
  159  *      reference, if one exists, is counted using the VPRC_OBJREF bit in the
  160  *      ref_count field.  Additionally, the VPRC_BLOCKED bit is used to
  161  *      atomically check for wirings and prevent new wirings via
  162  *      pmap_extract_and_hold().  When a page belongs to an object, it may be
  163  *      wired only when the object is locked, or the page is busy, or by
  164  *      pmap_extract_and_hold().  As a result, if the object is locked and the
  165  *      page is not busy (or is exclusively busied by the current thread), and
  166  *      the page is unmapped, its wire count will not increase.  The ref_count
  167  *      field is updated using atomic operations in most cases, except when it
  168  *      is known that no other references to the page exist, such as in the page
  169  *      allocator.  A page may be present in the page queues, or even actively
  170  *      scanned by the page daemon, without an explicitly counted referenced.
  171  *      The page daemon must therefore handle the possibility of a concurrent
  172  *      free of the page.
  173  *
  174  *      The queue state of a page consists of the queue and act_count fields of
  175  *      its atomically updated state, and the subset of atomic flags specified
  176  *      by PGA_QUEUE_STATE_MASK.  The queue field contains the page's page queue
  177  *      index, or PQ_NONE if it does not belong to a page queue.  To modify the
  178  *      queue field, the page queue lock corresponding to the old value must be
  179  *      held, unless that value is PQ_NONE, in which case the queue index must
  180  *      be updated using an atomic RMW operation.  There is one exception to
  181  *      this rule: the page daemon may transition the queue field from
  182  *      PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
  183  *      inactive queue scan.  At that point the page is already dequeued and no
  184  *      other references to that vm_page structure can exist.  The PGA_ENQUEUED
  185  *      flag, when set, indicates that the page structure is physically inserted
  186  *      into the queue corresponding to the page's queue index, and may only be
  187  *      set or cleared with the corresponding page queue lock held.
  188  *
  189  *      To avoid contention on page queue locks, page queue operations (enqueue,
  190  *      dequeue, requeue) are batched using fixed-size per-CPU queues.  A
  191  *      deferred operation is requested by setting one of the flags in
  192  *      PGA_QUEUE_OP_MASK and inserting an entry into a batch queue.  When a
  193  *      queue is full, an attempt to insert a new entry will lock the page
  194  *      queues and trigger processing of the pending entries.  The
  195  *      type-stability of vm_page structures is crucial to this scheme since the
  196  *      processing of entries in a given batch queue may be deferred
  197  *      indefinitely.  In particular, a page may be freed with pending batch
  198  *      queue entries.  The page queue operation flags must be set using atomic
  199  *      RWM operations.
  200  */
  201 
  202 #if PAGE_SIZE == 4096
  203 #define VM_PAGE_BITS_ALL 0xffu
  204 typedef uint8_t vm_page_bits_t;
  205 #elif PAGE_SIZE == 8192
  206 #define VM_PAGE_BITS_ALL 0xffffu
  207 typedef uint16_t vm_page_bits_t;
  208 #elif PAGE_SIZE == 16384
  209 #define VM_PAGE_BITS_ALL 0xffffffffu
  210 typedef uint32_t vm_page_bits_t;
  211 #elif PAGE_SIZE == 32768
  212 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
  213 typedef uint64_t vm_page_bits_t;
  214 #endif
  215 
  216 typedef union vm_page_astate {
  217         struct {
  218                 uint16_t flags;
  219                 uint8_t queue;
  220                 uint8_t act_count;
  221         };
  222         uint32_t _bits;
  223 } vm_page_astate_t;
  224 
  225 struct vm_page {
  226         union {
  227                 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
  228                 struct {
  229                         SLIST_ENTRY(vm_page) ss; /* private slists */
  230                 } s;
  231                 struct {
  232                         u_long p;
  233                         u_long v;
  234                 } memguard;
  235                 struct {
  236                         void *slab;
  237                         void *zone;
  238                 } uma;
  239         } plinks;
  240         TAILQ_ENTRY(vm_page) listq;     /* pages in same object (O) */
  241         vm_object_t object;             /* which object am I in (O) */
  242         vm_pindex_t pindex;             /* offset into object (O,P) */
  243         vm_paddr_t phys_addr;           /* physical address of page (C) */
  244         struct md_page md;              /* machine dependent stuff */
  245         u_int ref_count;                /* page references (A) */
  246         u_int busy_lock;                /* busy owners lock (A) */
  247         union vm_page_astate a;         /* state accessed atomically (A) */
  248         uint8_t order;                  /* index of the buddy queue (F) */
  249         uint8_t pool;                   /* vm_phys freepool index (F) */
  250         uint8_t flags;                  /* page PG_* flags (P) */
  251         uint8_t oflags;                 /* page VPO_* flags (O) */
  252         int8_t psind;                   /* pagesizes[] index (O) */
  253         int8_t segind;                  /* vm_phys segment index (C) */
  254         /* NOTE that these must support one bit per DEV_BSIZE in a page */
  255         /* so, on normal X86 kernels, they must be at least 8 bits wide */
  256         vm_page_bits_t valid;           /* valid DEV_BSIZE chunk map (O,B) */
  257         vm_page_bits_t dirty;           /* dirty DEV_BSIZE chunk map (M,B) */
  258 };
  259 
  260 /*
  261  * Special bits used in the ref_count field.
  262  *
  263  * ref_count is normally used to count wirings that prevent the page from being
  264  * reclaimed, but also supports several special types of references that do not
  265  * prevent reclamation.  Accesses to the ref_count field must be atomic unless
  266  * the page is unallocated.
  267  *
  268  * VPRC_OBJREF is the reference held by the containing object.  It can set or
  269  * cleared only when the corresponding object's write lock is held.
  270  *
  271  * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
  272  * attempting to tear down all mappings of a given page.  The page busy lock and
  273  * object write lock must both be held in order to set or clear this bit.
  274  */
  275 #define VPRC_BLOCKED    0x40000000u     /* mappings are being removed */
  276 #define VPRC_OBJREF     0x80000000u     /* object reference, cleared with (O) */
  277 #define VPRC_WIRE_COUNT(c)      ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
  278 #define VPRC_WIRE_COUNT_MAX     (~(VPRC_BLOCKED | VPRC_OBJREF))
  279 
  280 /*
  281  * Page flags stored in oflags:
  282  *
  283  * Access to these page flags is synchronized by the lock on the object
  284  * containing the page (O).
  285  *
  286  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
  287  *       indicates that the page is not under PV management but
  288  *       otherwise should be treated as a normal page.  Pages not
  289  *       under PV management cannot be paged out via the
  290  *       object/vm_page_t because there is no knowledge of their pte
  291  *       mappings, and such pages are also not on any PQ queue.
  292  *
  293  */
  294 #define VPO_KMEM_EXEC   0x01            /* kmem mapping allows execution */
  295 #define VPO_SWAPSLEEP   0x02            /* waiting for swap to finish */
  296 #define VPO_UNMANAGED   0x04            /* no PV management for page */
  297 #define VPO_SWAPINPROG  0x08            /* swap I/O in progress on page */
  298 
  299 /*
  300  * Busy page implementation details.
  301  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
  302  * even if the support for owner identity is removed because of size
  303  * constraints.  Checks on lock recursion are then not possible, while the
  304  * lock assertions effectiveness is someway reduced.
  305  */
  306 #define VPB_BIT_SHARED          0x01
  307 #define VPB_BIT_EXCLUSIVE       0x02
  308 #define VPB_BIT_WAITERS         0x04
  309 #define VPB_BIT_FLAGMASK                                                \
  310         (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
  311 
  312 #define VPB_SHARERS_SHIFT       3
  313 #define VPB_SHARERS(x)                                                  \
  314         (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
  315 #define VPB_SHARERS_WORD(x)     ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
  316 #define VPB_ONE_SHARER          (1 << VPB_SHARERS_SHIFT)
  317 
  318 #define VPB_SINGLE_EXCLUSIVE    VPB_BIT_EXCLUSIVE
  319 #ifdef INVARIANTS
  320 #define VPB_CURTHREAD_EXCLUSIVE                                         \
  321         (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
  322 #else
  323 #define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE
  324 #endif
  325 
  326 #define VPB_UNBUSIED            VPB_SHARERS_WORD(0)
  327 
  328 /* Freed lock blocks both shared and exclusive. */
  329 #define VPB_FREED               (0xffffffff - VPB_BIT_SHARED)
  330 
  331 #define PQ_NONE         255
  332 #define PQ_INACTIVE     0
  333 #define PQ_ACTIVE       1
  334 #define PQ_LAUNDRY      2
  335 #define PQ_UNSWAPPABLE  3
  336 #define PQ_COUNT        4
  337 
  338 #ifndef VM_PAGE_HAVE_PGLIST
  339 TAILQ_HEAD(pglist, vm_page);
  340 #define VM_PAGE_HAVE_PGLIST
  341 #endif
  342 SLIST_HEAD(spglist, vm_page);
  343 
  344 #ifdef _KERNEL
  345 extern vm_page_t bogus_page;
  346 #endif  /* _KERNEL */
  347 
  348 extern struct mtx_padalign pa_lock[];
  349 
  350 #if defined(__arm__)
  351 #define PDRSHIFT        PDR_SHIFT
  352 #elif !defined(PDRSHIFT)
  353 #define PDRSHIFT        21
  354 #endif
  355 
  356 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  357 #define PA_LOCKPTR(pa)  ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
  358 #define PA_LOCKOBJPTR(pa)       ((struct lock_object *)PA_LOCKPTR((pa)))
  359 #define PA_LOCK(pa)     mtx_lock(PA_LOCKPTR(pa))
  360 #define PA_TRYLOCK(pa)  mtx_trylock(PA_LOCKPTR(pa))
  361 #define PA_UNLOCK(pa)   mtx_unlock(PA_LOCKPTR(pa))
  362 #define PA_UNLOCK_COND(pa)                      \
  363         do {                                    \
  364                 if ((pa) != 0) {                \
  365                         PA_UNLOCK((pa));        \
  366                         (pa) = 0;               \
  367                 }                               \
  368         } while (0)
  369 
  370 #define PA_LOCK_ASSERT(pa, a)   mtx_assert(PA_LOCKPTR(pa), (a))
  371 
  372 #if defined(KLD_MODULE) && !defined(KLD_TIED)
  373 #define vm_page_lock(m)         vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
  374 #define vm_page_unlock(m)       vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
  375 #define vm_page_trylock(m)      vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
  376 #else   /* !KLD_MODULE */
  377 #define vm_page_lockptr(m)      (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
  378 #define vm_page_lock(m)         mtx_lock(vm_page_lockptr((m)))
  379 #define vm_page_unlock(m)       mtx_unlock(vm_page_lockptr((m)))
  380 #define vm_page_trylock(m)      mtx_trylock(vm_page_lockptr((m)))
  381 #endif
  382 #if defined(INVARIANTS)
  383 #define vm_page_assert_locked(m)                \
  384     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
  385 #define vm_page_lock_assert(m, a)               \
  386     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
  387 #else
  388 #define vm_page_assert_locked(m)
  389 #define vm_page_lock_assert(m, a)
  390 #endif
  391 
  392 /*
  393  * The vm_page's aflags are updated using atomic operations.  To set or clear
  394  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
  395  * must be used.  Neither these flags nor these functions are part of the KBI.
  396  *
  397  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
  398  * both the MI and MD VM layers.  However, kernel loadable modules should not
  399  * directly set this flag.  They should call vm_page_reference() instead.
  400  *
  401  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
  402  * When it does so, the object must be locked, or the page must be
  403  * exclusive busied.  The MI VM layer must never access this flag
  404  * directly.  Instead, it should call pmap_page_is_write_mapped().
  405  *
  406  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
  407  * at least one executable mapping.  It is not consumed by the MI VM layer.
  408  *
  409  * PGA_NOSYNC must be set and cleared with the page busy lock held.
  410  *
  411  * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
  412  * from a page queue, respectively.  It determines whether the plinks.q field
  413  * of the page is valid.  To set or clear this flag, page's "queue" field must
  414  * be a valid queue index, and the corresponding page queue lock must be held.
  415  *
  416  * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
  417  * queue, and cleared when the dequeue request is processed.  A page may
  418  * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
  419  * is requested after the page is scheduled to be enqueued but before it is
  420  * actually inserted into the page queue.
  421  *
  422  * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
  423  * in its page queue.
  424  *
  425  * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
  426  * the inactive queue, thus bypassing LRU.
  427  *
  428  * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
  429  * atomic RMW operation to ensure that the "queue" field is a valid queue index,
  430  * and the corresponding page queue lock must be held when clearing any of the
  431  * flags.
  432  *
  433  * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
  434  * when the context that dirties the page does not have the object write lock
  435  * held.
  436  */
  437 #define PGA_WRITEABLE   0x0001          /* page may be mapped writeable */
  438 #define PGA_REFERENCED  0x0002          /* page has been referenced */
  439 #define PGA_EXECUTABLE  0x0004          /* page may be mapped executable */
  440 #define PGA_ENQUEUED    0x0008          /* page is enqueued in a page queue */
  441 #define PGA_DEQUEUE     0x0010          /* page is due to be dequeued */
  442 #define PGA_REQUEUE     0x0020          /* page is due to be requeued */
  443 #define PGA_REQUEUE_HEAD 0x0040         /* page requeue should bypass LRU */
  444 #define PGA_NOSYNC      0x0080          /* do not collect for syncer */
  445 #define PGA_SWAP_FREE   0x0100          /* page with swap space was dirtied */
  446 #define PGA_SWAP_SPACE  0x0200          /* page has allocated swap space */
  447 
  448 #define PGA_QUEUE_OP_MASK       (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
  449 #define PGA_QUEUE_STATE_MASK    (PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
  450 
  451 /*
  452  * Page flags.  Updates to these flags are not synchronized, and thus they must
  453  * be set during page allocation or free to avoid races.
  454  *
  455  * The PG_PCPU_CACHE flag is set at allocation time if the page was
  456  * allocated from a per-CPU cache.  It is cleared the next time that the
  457  * page is allocated from the physical memory allocator.
  458  */
  459 #define PG_PCPU_CACHE   0x01            /* was allocated from per-CPU caches */
  460 #define PG_FICTITIOUS   0x02            /* physical page doesn't exist */
  461 #define PG_ZERO         0x04            /* page is zeroed */
  462 #define PG_MARKER       0x08            /* special queue marker page */
  463 #define PG_NODUMP       0x10            /* don't include this page in a dump */
  464 
  465 /*
  466  * Misc constants.
  467  */
  468 #define ACT_DECLINE             1
  469 #define ACT_ADVANCE             3
  470 #define ACT_INIT                5
  471 #define ACT_MAX                 64
  472 
  473 #ifdef _KERNEL
  474 
  475 #include <sys/kassert.h>
  476 #include <machine/atomic.h>
  477 
  478 /*
  479  * Each pageable resident page falls into one of five lists:
  480  *
  481  *      free
  482  *              Available for allocation now.
  483  *
  484  *      inactive
  485  *              Low activity, candidates for reclamation.
  486  *              This list is approximately LRU ordered.
  487  *
  488  *      laundry
  489  *              This is the list of pages that should be
  490  *              paged out next.
  491  *
  492  *      unswappable
  493  *              Dirty anonymous pages that cannot be paged
  494  *              out because no swap device is configured.
  495  *
  496  *      active
  497  *              Pages that are "active", i.e., they have been
  498  *              recently referenced.
  499  *
  500  */
  501 
  502 extern vm_page_t vm_page_array;         /* First resident page in table */
  503 extern long vm_page_array_size;         /* number of vm_page_t's */
  504 extern long first_page;                 /* first physical page number */
  505 
  506 #define VM_PAGE_TO_PHYS(entry)  ((entry)->phys_addr)
  507 
  508 /*
  509  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
  510  * page to which the given physical address belongs. The correct vm_page_t
  511  * object is returned for addresses that are not page-aligned.
  512  */
  513 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
  514 
  515 /*
  516  * Page allocation parameters for vm_page for the functions
  517  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
  518  * vm_page_alloc_freelist().  Some functions support only a subset
  519  * of the flags, and ignore others, see the flags legend.
  520  *
  521  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
  522  * and the vm_page_grab*() functions.  See these functions for details.
  523  *
  524  * Bits 0 - 1 define class.
  525  * Bits 2 - 15 dedicated for flags.
  526  * Legend:
  527  * (a) - vm_page_alloc() supports the flag.
  528  * (c) - vm_page_alloc_contig() supports the flag.
  529  * (g) - vm_page_grab() supports the flag.
  530  * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
  531  * (p) - vm_page_grab_pages() supports the flag.
  532  * Bits above 15 define the count of additional pages that the caller
  533  * intends to allocate.
  534  */
  535 #define VM_ALLOC_NORMAL         0
  536 #define VM_ALLOC_INTERRUPT      1
  537 #define VM_ALLOC_SYSTEM         2
  538 #define VM_ALLOC_CLASS_MASK     3
  539 #define VM_ALLOC_WAITOK         0x0008  /* (acn) Sleep and retry */
  540 #define VM_ALLOC_WAITFAIL       0x0010  /* (acn) Sleep and return error */
  541 #define VM_ALLOC_WIRED          0x0020  /* (acgnp) Allocate a wired page */
  542 #define VM_ALLOC_ZERO           0x0040  /* (acgnp) Allocate a zeroed page */
  543 #define VM_ALLOC_NORECLAIM      0x0080  /* (c) Do not reclaim after failure */
  544 #define VM_ALLOC_AVAIL0         0x0100
  545 #define VM_ALLOC_NOBUSY         0x0200  /* (acgp) Do not excl busy the page */
  546 #define VM_ALLOC_NOCREAT        0x0400  /* (gp) Don't create a page */
  547 #define VM_ALLOC_AVAIL1         0x0800
  548 #define VM_ALLOC_IGN_SBUSY      0x1000  /* (gp) Ignore shared busy flag */
  549 #define VM_ALLOC_NODUMP         0x2000  /* (ag) don't include in dump */
  550 #define VM_ALLOC_SBUSY          0x4000  /* (acgp) Shared busy the page */
  551 #define VM_ALLOC_NOWAIT         0x8000  /* (acgnp) Do not sleep */
  552 #define VM_ALLOC_COUNT_MAX      0xffff
  553 #define VM_ALLOC_COUNT_SHIFT    16
  554 #define VM_ALLOC_COUNT_MASK     (VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
  555 #define VM_ALLOC_COUNT(count)   ({                              \
  556         KASSERT((count) <= VM_ALLOC_COUNT_MAX,                  \
  557             ("%s: invalid VM_ALLOC_COUNT value", __func__));    \
  558         (count) << VM_ALLOC_COUNT_SHIFT;                        \
  559 })
  560 
  561 #ifdef M_NOWAIT
  562 static inline int
  563 malloc2vm_flags(int malloc_flags)
  564 {
  565         int pflags;
  566 
  567         KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
  568             (malloc_flags & M_NOWAIT) != 0,
  569             ("M_USE_RESERVE requires M_NOWAIT"));
  570         pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
  571             VM_ALLOC_SYSTEM;
  572         if ((malloc_flags & M_ZERO) != 0)
  573                 pflags |= VM_ALLOC_ZERO;
  574         if ((malloc_flags & M_NODUMP) != 0)
  575                 pflags |= VM_ALLOC_NODUMP;
  576         if ((malloc_flags & M_NOWAIT))
  577                 pflags |= VM_ALLOC_NOWAIT;
  578         if ((malloc_flags & M_WAITOK))
  579                 pflags |= VM_ALLOC_WAITOK;
  580         if ((malloc_flags & M_NORECLAIM))
  581                 pflags |= VM_ALLOC_NORECLAIM;
  582         return (pflags);
  583 }
  584 #endif
  585 
  586 /*
  587  * Predicates supported by vm_page_ps_test():
  588  *
  589  *      PS_ALL_DIRTY is true only if the entire (super)page is dirty.
  590  *      However, it can be spuriously false when the (super)page has become
  591  *      dirty in the pmap but that information has not been propagated to the
  592  *      machine-independent layer.
  593  */
  594 #define PS_ALL_DIRTY    0x1
  595 #define PS_ALL_VALID    0x2
  596 #define PS_NONE_BUSY    0x4
  597 
  598 bool vm_page_busy_acquire(vm_page_t m, int allocflags);
  599 void vm_page_busy_downgrade(vm_page_t m);
  600 int vm_page_busy_tryupgrade(vm_page_t m);
  601 bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
  602 void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
  603     vm_pindex_t pindex, const char *wmesg, int allocflags);
  604 void vm_page_free(vm_page_t m);
  605 void vm_page_free_zero(vm_page_t m);
  606 
  607 void vm_page_activate (vm_page_t);
  608 void vm_page_advise(vm_page_t m, int advice);
  609 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
  610 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
  611 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
  612 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
  613     vm_page_t);
  614 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
  615     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
  616     vm_paddr_t boundary, vm_memattr_t memattr);
  617 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
  618     vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
  619     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  620     vm_memattr_t memattr);
  621 vm_page_t vm_page_alloc_freelist(int, int);
  622 vm_page_t vm_page_alloc_freelist_domain(int, int, int);
  623 vm_page_t vm_page_alloc_noobj(int);
  624 vm_page_t vm_page_alloc_noobj_domain(int, int);
  625 vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
  626     vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  627     vm_memattr_t memattr);
  628 vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
  629     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  630     vm_memattr_t memattr);
  631 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
  632 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
  633 vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
  634 vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
  635 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
  636     vm_page_t *ma, int count);
  637 int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
  638     int allocflags, vm_page_t *ma, int count);
  639 int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
  640     int allocflags);
  641 int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
  642     vm_pindex_t pindex, int allocflags);
  643 void vm_page_deactivate(vm_page_t);
  644 void vm_page_deactivate_noreuse(vm_page_t);
  645 void vm_page_dequeue(vm_page_t m);
  646 void vm_page_dequeue_deferred(vm_page_t m);
  647 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
  648 void vm_page_free_invalid(vm_page_t);
  649 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
  650 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
  651 void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
  652 void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind);
  653 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
  654 void vm_page_invalid(vm_page_t m);
  655 void vm_page_launder(vm_page_t m);
  656 vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
  657 vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
  658 vm_page_t vm_page_next(vm_page_t m);
  659 void vm_page_pqbatch_drain(void);
  660 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
  661 bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
  662     vm_page_astate_t new);
  663 vm_page_t vm_page_prev(vm_page_t m);
  664 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
  665 void vm_page_putfake(vm_page_t m);
  666 void vm_page_readahead_finish(vm_page_t m);
  667 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
  668     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
  669 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
  670     vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
  671 void vm_page_reference(vm_page_t m);
  672 #define VPR_TRYFREE     0x01
  673 #define VPR_NOREUSE     0x02
  674 void vm_page_release(vm_page_t m, int flags);
  675 void vm_page_release_locked(vm_page_t m, int flags);
  676 vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
  677 bool vm_page_remove(vm_page_t);
  678 bool vm_page_remove_xbusy(vm_page_t);
  679 int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
  680 void vm_page_replace(vm_page_t mnew, vm_object_t object,
  681     vm_pindex_t pindex, vm_page_t mold);
  682 int vm_page_sbusied(vm_page_t m);
  683 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
  684     vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
  685 vm_page_bits_t vm_page_set_dirty(vm_page_t m);
  686 void vm_page_set_valid_range(vm_page_t m, int base, int size);
  687 vm_offset_t vm_page_startup(vm_offset_t vaddr);
  688 void vm_page_sunbusy(vm_page_t m);
  689 bool vm_page_try_remove_all(vm_page_t m);
  690 bool vm_page_try_remove_write(vm_page_t m);
  691 int vm_page_trysbusy(vm_page_t m);
  692 int vm_page_tryxbusy(vm_page_t m);
  693 void vm_page_unhold_pages(vm_page_t *ma, int count);
  694 void vm_page_unswappable(vm_page_t m);
  695 void vm_page_unwire(vm_page_t m, uint8_t queue);
  696 bool vm_page_unwire_noq(vm_page_t m);
  697 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
  698 void vm_page_wire(vm_page_t);
  699 bool vm_page_wire_mapped(vm_page_t m);
  700 void vm_page_xunbusy_hard(vm_page_t m);
  701 void vm_page_xunbusy_hard_unchecked(vm_page_t m);
  702 void vm_page_set_validclean (vm_page_t, int, int);
  703 void vm_page_clear_dirty(vm_page_t, int, int);
  704 void vm_page_set_invalid(vm_page_t, int, int);
  705 void vm_page_valid(vm_page_t m);
  706 int vm_page_is_valid(vm_page_t, int, int);
  707 void vm_page_test_dirty(vm_page_t);
  708 vm_page_bits_t vm_page_bits(int base, int size);
  709 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
  710 void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
  711 
  712 void vm_page_dirty_KBI(vm_page_t m);
  713 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
  714 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
  715 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
  716 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
  717 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
  718 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
  719 #endif
  720 
  721 #define vm_page_busy_fetch(m)   atomic_load_int(&(m)->busy_lock)
  722 
  723 #define vm_page_assert_busied(m)                                        \
  724         KASSERT(vm_page_busied(m),                                      \
  725             ("vm_page_assert_busied: page %p not busy @ %s:%d", \
  726             (m), __FILE__, __LINE__))
  727 
  728 #define vm_page_assert_sbusied(m)                                       \
  729         KASSERT(vm_page_sbusied(m),                                     \
  730             ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
  731             (m), __FILE__, __LINE__))
  732 
  733 #define vm_page_assert_unbusied(m)                                      \
  734         KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) !=           \
  735             VPB_CURTHREAD_EXCLUSIVE,                                    \
  736             ("vm_page_assert_xbusied: page %p busy_lock %#x owned"      \
  737             " by me @ %s:%d",                                           \
  738             (m), (m)->busy_lock, __FILE__, __LINE__));                  \
  739 
  740 #define vm_page_assert_xbusied_unchecked(m) do {                        \
  741         KASSERT(vm_page_xbusied(m),                                     \
  742             ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
  743             (m), __FILE__, __LINE__));                                  \
  744 } while (0)
  745 #define vm_page_assert_xbusied(m) do {                                  \
  746         vm_page_assert_xbusied_unchecked(m);                            \
  747         KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) ==           \
  748             VPB_CURTHREAD_EXCLUSIVE,                                    \
  749             ("vm_page_assert_xbusied: page %p busy_lock %#x not owned"  \
  750             " by me @ %s:%d",                                           \
  751             (m), (m)->busy_lock, __FILE__, __LINE__));                  \
  752 } while (0)
  753 
  754 #define vm_page_busied(m)                                               \
  755         (vm_page_busy_fetch(m) != VPB_UNBUSIED)
  756 
  757 #define vm_page_xbusied(m)                                              \
  758         ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
  759 
  760 #define vm_page_busy_freed(m)                                           \
  761         (vm_page_busy_fetch(m) == VPB_FREED)
  762 
  763 /* Note: page m's lock must not be owned by the caller. */
  764 #define vm_page_xunbusy(m) do {                                         \
  765         if (!atomic_cmpset_rel_int(&(m)->busy_lock,                     \
  766             VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))                     \
  767                 vm_page_xunbusy_hard(m);                                \
  768 } while (0)
  769 #define vm_page_xunbusy_unchecked(m) do {                               \
  770         if (!atomic_cmpset_rel_int(&(m)->busy_lock,                     \
  771             VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED))                     \
  772                 vm_page_xunbusy_hard_unchecked(m);                      \
  773 } while (0)
  774 
  775 #ifdef INVARIANTS
  776 void vm_page_object_busy_assert(vm_page_t m);
  777 #define VM_PAGE_OBJECT_BUSY_ASSERT(m)   vm_page_object_busy_assert(m)
  778 void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
  779 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)                           \
  780         vm_page_assert_pga_writeable(m, bits)
  781 /*
  782  * Claim ownership of a page's xbusy state.  In non-INVARIANTS kernels this
  783  * operation is a no-op since ownership is not tracked.  In particular
  784  * this macro does not provide any synchronization with the previous owner.
  785  */
  786 #define vm_page_xbusy_claim(m) do {                                     \
  787         u_int _busy_lock;                                               \
  788                                                                         \
  789         vm_page_assert_xbusied_unchecked((m));                          \
  790         do {                                                            \
  791                 _busy_lock = vm_page_busy_fetch(m);                     \
  792         } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock,        \
  793             (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
  794 } while (0)
  795 #else
  796 #define VM_PAGE_OBJECT_BUSY_ASSERT(m)   (void)0
  797 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)   (void)0
  798 #define vm_page_xbusy_claim(m)
  799 #endif
  800 
  801 #if BYTE_ORDER == BIG_ENDIAN
  802 #define VM_PAGE_AFLAG_SHIFT     16
  803 #else
  804 #define VM_PAGE_AFLAG_SHIFT     0
  805 #endif
  806 
  807 /*
  808  *      Load a snapshot of a page's 32-bit atomic state.
  809  */
  810 static inline vm_page_astate_t
  811 vm_page_astate_load(vm_page_t m)
  812 {
  813         vm_page_astate_t a;
  814 
  815         a._bits = atomic_load_32(&m->a._bits);
  816         return (a);
  817 }
  818 
  819 /*
  820  *      Atomically compare and set a page's atomic state.
  821  */
  822 static inline bool
  823 vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
  824 {
  825 
  826         KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
  827             ("%s: invalid head requeue request for page %p", __func__, m));
  828         KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
  829             ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
  830         KASSERT(new._bits != old->_bits,
  831             ("%s: bits are unchanged", __func__));
  832 
  833         return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
  834 }
  835 
  836 /*
  837  *      Clear the given bits in the specified page.
  838  */
  839 static inline void
  840 vm_page_aflag_clear(vm_page_t m, uint16_t bits)
  841 {
  842         uint32_t *addr, val;
  843 
  844         /*
  845          * Access the whole 32-bit word containing the aflags field with an
  846          * atomic update.  Parallel non-atomic updates to the other fields
  847          * within this word are handled properly by the atomic update.
  848          */
  849         addr = (void *)&m->a;
  850         val = bits << VM_PAGE_AFLAG_SHIFT;
  851         atomic_clear_32(addr, val);
  852 }
  853 
  854 /*
  855  *      Set the given bits in the specified page.
  856  */
  857 static inline void
  858 vm_page_aflag_set(vm_page_t m, uint16_t bits)
  859 {
  860         uint32_t *addr, val;
  861 
  862         VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
  863 
  864         /*
  865          * Access the whole 32-bit word containing the aflags field with an
  866          * atomic update.  Parallel non-atomic updates to the other fields
  867          * within this word are handled properly by the atomic update.
  868          */
  869         addr = (void *)&m->a;
  870         val = bits << VM_PAGE_AFLAG_SHIFT;
  871         atomic_set_32(addr, val);
  872 }
  873 
  874 /*
  875  *      vm_page_dirty:
  876  *
  877  *      Set all bits in the page's dirty field.
  878  *
  879  *      The object containing the specified page must be locked if the
  880  *      call is made from the machine-independent layer.
  881  *
  882  *      See vm_page_clear_dirty_mask().
  883  */
  884 static __inline void
  885 vm_page_dirty(vm_page_t m)
  886 {
  887 
  888         /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
  889 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
  890         vm_page_dirty_KBI(m);
  891 #else
  892         m->dirty = VM_PAGE_BITS_ALL;
  893 #endif
  894 }
  895 
  896 /*
  897  *      vm_page_undirty:
  898  *
  899  *      Set page to not be dirty.  Note: does not clear pmap modify bits
  900  */
  901 static __inline void
  902 vm_page_undirty(vm_page_t m)
  903 {
  904 
  905         VM_PAGE_OBJECT_BUSY_ASSERT(m);
  906         m->dirty = 0;
  907 }
  908 
  909 static inline uint8_t
  910 _vm_page_queue(vm_page_astate_t as)
  911 {
  912 
  913         if ((as.flags & PGA_DEQUEUE) != 0)
  914                 return (PQ_NONE);
  915         return (as.queue);
  916 }
  917 
  918 /*
  919  *      vm_page_queue:
  920  *
  921  *      Return the index of the queue containing m.
  922  */
  923 static inline uint8_t
  924 vm_page_queue(vm_page_t m)
  925 {
  926 
  927         return (_vm_page_queue(vm_page_astate_load(m)));
  928 }
  929 
  930 static inline bool
  931 vm_page_active(vm_page_t m)
  932 {
  933 
  934         return (vm_page_queue(m) == PQ_ACTIVE);
  935 }
  936 
  937 static inline bool
  938 vm_page_inactive(vm_page_t m)
  939 {
  940 
  941         return (vm_page_queue(m) == PQ_INACTIVE);
  942 }
  943 
  944 static inline bool
  945 vm_page_in_laundry(vm_page_t m)
  946 {
  947         uint8_t queue;
  948 
  949         queue = vm_page_queue(m);
  950         return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
  951 }
  952 
  953 /*
  954  *      vm_page_drop:
  955  *
  956  *      Release a reference to a page and return the old reference count.
  957  */
  958 static inline u_int
  959 vm_page_drop(vm_page_t m, u_int val)
  960 {
  961         u_int old;
  962 
  963         /*
  964          * Synchronize with vm_page_free_prep(): ensure that all updates to the
  965          * page structure are visible before it is freed.
  966          */
  967         atomic_thread_fence_rel();
  968         old = atomic_fetchadd_int(&m->ref_count, -val);
  969         KASSERT(old != VPRC_BLOCKED,
  970             ("vm_page_drop: page %p has an invalid refcount value", m));
  971         return (old);
  972 }
  973 
  974 /*
  975  *      vm_page_wired:
  976  *
  977  *      Perform a racy check to determine whether a reference prevents the page
  978  *      from being reclaimable.  If the page's object is locked, and the page is
  979  *      unmapped and exclusively busied by the current thread, no new wirings
  980  *      may be created.
  981  */
  982 static inline bool
  983 vm_page_wired(vm_page_t m)
  984 {
  985 
  986         return (VPRC_WIRE_COUNT(m->ref_count) > 0);
  987 }
  988 
  989 static inline bool
  990 vm_page_all_valid(vm_page_t m)
  991 {
  992 
  993         return (m->valid == VM_PAGE_BITS_ALL);
  994 }
  995 
  996 static inline bool
  997 vm_page_any_valid(vm_page_t m)
  998 {
  999 
 1000         return (m->valid != 0);
 1001 }
 1002 
 1003 static inline bool
 1004 vm_page_none_valid(vm_page_t m)
 1005 {
 1006 
 1007         return (m->valid == 0);
 1008 }
 1009 
 1010 static inline int
 1011 vm_page_domain(vm_page_t m)
 1012 {
 1013 #ifdef NUMA
 1014         int domn, segind;
 1015 
 1016         segind = m->segind;
 1017         KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
 1018         domn = vm_phys_segs[segind].domain;
 1019         KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
 1020         return (domn);
 1021 #else
 1022         return (0);
 1023 #endif
 1024 }
 1025 
 1026 #endif                          /* _KERNEL */
 1027 #endif                          /* !_VM_PAGE_ */

Cache object: 3fc0f25f660b3c22523fdc97e6f823e2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.