The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)vm_page.h     8.2 (Berkeley) 12/13/93
   33  *
   34  *
   35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   36  * All rights reserved.
   37  *
   38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   39  *
   40  * Permission to use, copy, modify and distribute this software and
   41  * its documentation is hereby granted, provided that both the copyright
   42  * notice and this permission notice appear in all copies of the
   43  * software, derivative works or modified versions, and any portions
   44  * thereof, and that both notices appear in supporting documentation.
   45  *
   46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   49  *
   50  * Carnegie Mellon requests users of this software to return to
   51  *
   52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   53  *  School of Computer Science
   54  *  Carnegie Mellon University
   55  *  Pittsburgh PA 15213-3890
   56  *
   57  * any improvements or extensions that they make and grant Carnegie the
   58  * rights to redistribute these changes.
   59  *
   60  * $FreeBSD$
   61  */
   62 
   63 /*
   64  *      Resident memory system definitions.
   65  */
   66 
   67 #ifndef _VM_PAGE_
   68 #define _VM_PAGE_
   69 
   70 #include <vm/pmap.h>
   71 
   72 /*
   73  *      Management of resident (logical) pages.
   74  *
   75  *      A small structure is kept for each resident
   76  *      page, indexed by page number.  Each structure
   77  *      is an element of several collections:
   78  *
   79  *              A radix tree used to quickly
   80  *              perform object/offset lookups
   81  *
   82  *              A list of all pages for a given object,
   83  *              so they can be quickly deactivated at
   84  *              time of deallocation.
   85  *
   86  *              An ordered list of pages due for pageout.
   87  *
   88  *      In addition, the structure contains the object
   89  *      and offset to which this page belongs (for pageout),
   90  *      and sundry status bits.
   91  *
   92  *      In general, operations on this structure's mutable fields are
   93  *      synchronized using either one of or a combination of the lock on the
   94  *      object that the page belongs to (O), the pool lock for the page (P),
   95  *      or the lock for either the free or paging queue (Q).  If a field is
   96  *      annotated below with two of these locks, then holding either lock is
   97  *      sufficient for read access, but both locks are required for write
   98  *      access.
   99  *
  100  *      In contrast, the synchronization of accesses to the page's
  101  *      dirty field is machine dependent (M).  In the
  102  *      machine-independent layer, the lock on the object that the
  103  *      page belongs to must be held in order to operate on the field.
  104  *      However, the pmap layer is permitted to set all bits within
  105  *      the field without holding that lock.  If the underlying
  106  *      architecture does not support atomic read-modify-write
  107  *      operations on the field's type, then the machine-independent
  108  *      layer uses a 32-bit atomic on the aligned 32-bit word that
  109  *      contains the dirty field.  In the machine-independent layer,
  110  *      the implementation of read-modify-write operations on the
  111  *      field is encapsulated in vm_page_clear_dirty_mask().
  112  */
  113 
  114 #if PAGE_SIZE == 4096
  115 #define VM_PAGE_BITS_ALL 0xffu
  116 typedef uint8_t vm_page_bits_t;
  117 #elif PAGE_SIZE == 8192
  118 #define VM_PAGE_BITS_ALL 0xffffu
  119 typedef uint16_t vm_page_bits_t;
  120 #elif PAGE_SIZE == 16384
  121 #define VM_PAGE_BITS_ALL 0xffffffffu
  122 typedef uint32_t vm_page_bits_t;
  123 #elif PAGE_SIZE == 32768
  124 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
  125 typedef uint64_t vm_page_bits_t;
  126 #endif
  127 
  128 struct vm_page {
  129         union {
  130                 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
  131                 struct {
  132                         SLIST_ENTRY(vm_page) ss; /* private slists */
  133                         void *pv;
  134                 } s;
  135                 struct {
  136                         u_long p;
  137                         u_long v;
  138                 } memguard;
  139         } plinks;
  140         TAILQ_ENTRY(vm_page) listq;     /* pages in same object (O) */
  141         vm_object_t object;             /* which object am I in (O,P) */
  142         vm_pindex_t pindex;             /* offset into object (O,P) */
  143         vm_paddr_t phys_addr;           /* physical address of page */
  144         struct md_page md;              /* machine dependent stuff */
  145         u_int wire_count;               /* wired down maps refs (P) */
  146         volatile u_int busy_lock;       /* busy owners lock */
  147         uint16_t hold_count;            /* page hold count (P) */
  148         uint16_t flags;                 /* page PG_* flags (P) */
  149         uint8_t aflags;                 /* access is atomic */
  150         uint8_t oflags;                 /* page VPO_* flags (O) */
  151         uint8_t queue;                  /* page queue index (P,Q) */
  152         int8_t psind;                   /* pagesizes[] index (O) */
  153         int8_t segind;
  154         uint8_t order;                  /* index of the buddy queue */
  155         uint8_t pool;
  156         u_char  act_count;              /* page usage count (P) */
  157         /* NOTE that these must support one bit per DEV_BSIZE in a page */
  158         /* so, on normal X86 kernels, they must be at least 8 bits wide */
  159         vm_page_bits_t valid;           /* map of valid DEV_BSIZE chunks (O) */
  160         vm_page_bits_t dirty;           /* map of dirty DEV_BSIZE chunks (M) */
  161 };
  162 
  163 /*
  164  * Page flags stored in oflags:
  165  *
  166  * Access to these page flags is synchronized by the lock on the object
  167  * containing the page (O).
  168  *
  169  * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
  170  *       indicates that the page is not under PV management but
  171  *       otherwise should be treated as a normal page.  Pages not
  172  *       under PV management cannot be paged out via the
  173  *       object/vm_page_t because there is no knowledge of their pte
  174  *       mappings, and such pages are also not on any PQ queue.
  175  *
  176  */
  177 #define VPO_UNUSED01    0x01            /* --available-- */
  178 #define VPO_SWAPSLEEP   0x02            /* waiting for swap to finish */
  179 #define VPO_UNMANAGED   0x04            /* no PV management for page */
  180 #define VPO_SWAPINPROG  0x08            /* swap I/O in progress on page */
  181 #define VPO_NOSYNC      0x10            /* do not collect for syncer */
  182 
  183 /*
  184  * Busy page implementation details.
  185  * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
  186  * even if the support for owner identity is removed because of size
  187  * constraints.  Checks on lock recursion are then not possible, while the
  188  * lock assertions effectiveness is someway reduced.
  189  */
  190 #define VPB_BIT_SHARED          0x01
  191 #define VPB_BIT_EXCLUSIVE       0x02
  192 #define VPB_BIT_WAITERS         0x04
  193 #define VPB_BIT_FLAGMASK                                                \
  194         (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
  195 
  196 #define VPB_SHARERS_SHIFT       3
  197 #define VPB_SHARERS(x)                                                  \
  198         (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
  199 #define VPB_SHARERS_WORD(x)     ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
  200 #define VPB_ONE_SHARER          (1 << VPB_SHARERS_SHIFT)
  201 
  202 #define VPB_SINGLE_EXCLUSIVER   VPB_BIT_EXCLUSIVE
  203 
  204 #define VPB_UNBUSIED            VPB_SHARERS_WORD(0)
  205 
  206 #define PQ_NONE         255
  207 #define PQ_INACTIVE     0
  208 #define PQ_ACTIVE       1
  209 #define PQ_LAUNDRY      2
  210 #define PQ_COUNT        3
  211 
  212 #ifndef VM_PAGE_HAVE_PGLIST
  213 TAILQ_HEAD(pglist, vm_page);
  214 #define VM_PAGE_HAVE_PGLIST
  215 #endif
  216 SLIST_HEAD(spglist, vm_page);
  217 
  218 struct vm_pagequeue {
  219         struct mtx      pq_mutex;
  220         struct pglist   pq_pl;
  221         int             pq_cnt;
  222         u_int           * const pq_vcnt;
  223         const char      * const pq_name;
  224 } __aligned(CACHE_LINE_SIZE);
  225 
  226 
  227 struct vm_domain {
  228         struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
  229         u_int vmd_page_count;
  230         u_int vmd_free_count;
  231         long vmd_segs;  /* bitmask of the segments */
  232         boolean_t vmd_oom;
  233         int vmd_oom_seq;
  234         int vmd_last_active_scan;
  235         struct vm_page vmd_laundry_marker;
  236         struct vm_page vmd_marker; /* marker for pagedaemon private use */
  237         struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
  238 };
  239 
  240 extern struct vm_domain vm_dom[MAXMEMDOM];
  241 
  242 #define vm_pagequeue_assert_locked(pq)  mtx_assert(&(pq)->pq_mutex, MA_OWNED)
  243 #define vm_pagequeue_lock(pq)           mtx_lock(&(pq)->pq_mutex)
  244 #define vm_pagequeue_lockptr(pq)        (&(pq)->pq_mutex)
  245 #define vm_pagequeue_unlock(pq)         mtx_unlock(&(pq)->pq_mutex)
  246 
  247 #ifdef _KERNEL
  248 static __inline void
  249 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
  250 {
  251 
  252 #ifdef notyet
  253         vm_pagequeue_assert_locked(pq);
  254 #endif
  255         pq->pq_cnt += addend;
  256         atomic_add_int(pq->pq_vcnt, addend);
  257 }
  258 #define vm_pagequeue_cnt_inc(pq)        vm_pagequeue_cnt_add((pq), 1)
  259 #define vm_pagequeue_cnt_dec(pq)        vm_pagequeue_cnt_add((pq), -1)
  260 #endif  /* _KERNEL */
  261 
  262 extern struct mtx_padalign vm_page_queue_free_mtx;
  263 extern struct mtx_padalign pa_lock[];
  264 
  265 #if defined(__arm__)
  266 #define PDRSHIFT        PDR_SHIFT
  267 #elif !defined(PDRSHIFT)
  268 #define PDRSHIFT        21
  269 #endif
  270 
  271 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  272 #define PA_LOCKPTR(pa)  ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
  273 #define PA_LOCKOBJPTR(pa)       ((struct lock_object *)PA_LOCKPTR((pa)))
  274 #define PA_LOCK(pa)     mtx_lock(PA_LOCKPTR(pa))
  275 #define PA_TRYLOCK(pa)  mtx_trylock(PA_LOCKPTR(pa))
  276 #define PA_UNLOCK(pa)   mtx_unlock(PA_LOCKPTR(pa))
  277 #define PA_UNLOCK_COND(pa)                      \
  278         do {                                    \
  279                 if ((pa) != 0) {                \
  280                         PA_UNLOCK((pa));        \
  281                         (pa) = 0;               \
  282                 }                               \
  283         } while (0)
  284 
  285 #define PA_LOCK_ASSERT(pa, a)   mtx_assert(PA_LOCKPTR(pa), (a))
  286 
  287 #ifdef KLD_MODULE
  288 #define vm_page_lock(m)         vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
  289 #define vm_page_unlock(m)       vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
  290 #define vm_page_trylock(m)      vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
  291 #else   /* !KLD_MODULE */
  292 #define vm_page_lockptr(m)      (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
  293 #define vm_page_lock(m)         mtx_lock(vm_page_lockptr((m)))
  294 #define vm_page_unlock(m)       mtx_unlock(vm_page_lockptr((m)))
  295 #define vm_page_trylock(m)      mtx_trylock(vm_page_lockptr((m)))
  296 #endif
  297 #if defined(INVARIANTS)
  298 #define vm_page_assert_locked(m)                \
  299     vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
  300 #define vm_page_lock_assert(m, a)               \
  301     vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
  302 #else
  303 #define vm_page_assert_locked(m)
  304 #define vm_page_lock_assert(m, a)
  305 #endif
  306 
  307 /*
  308  * The vm_page's aflags are updated using atomic operations.  To set or clear
  309  * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
  310  * must be used.  Neither these flags nor these functions are part of the KBI.
  311  *
  312  * PGA_REFERENCED may be cleared only if the page is locked.  It is set by
  313  * both the MI and MD VM layers.  However, kernel loadable modules should not
  314  * directly set this flag.  They should call vm_page_reference() instead.
  315  *
  316  * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
  317  * When it does so, the object must be locked, or the page must be
  318  * exclusive busied.  The MI VM layer must never access this flag
  319  * directly.  Instead, it should call pmap_page_is_write_mapped().
  320  *
  321  * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
  322  * at least one executable mapping.  It is not consumed by the MI VM layer.
  323  */
  324 #define PGA_WRITEABLE   0x01            /* page may be mapped writeable */
  325 #define PGA_REFERENCED  0x02            /* page has been referenced */
  326 #define PGA_EXECUTABLE  0x04            /* page may be mapped executable */
  327 
  328 /*
  329  * Page flags.  If changed at any other time than page allocation or
  330  * freeing, the modification must be protected by the vm_page lock.
  331  */
  332 #define PG_FICTITIOUS   0x0004          /* physical page doesn't exist */
  333 #define PG_ZERO         0x0008          /* page is zeroed */
  334 #define PG_MARKER       0x0010          /* special queue marker page */
  335 #define PG_NODUMP       0x0080          /* don't include this page in a dump */
  336 #define PG_UNHOLDFREE   0x0100          /* delayed free of a held page */
  337 
  338 /*
  339  * Misc constants.
  340  */
  341 #define ACT_DECLINE             1
  342 #define ACT_ADVANCE             3
  343 #define ACT_INIT                5
  344 #define ACT_MAX                 64
  345 
  346 #ifdef _KERNEL
  347 
  348 #include <sys/systm.h>
  349 
  350 #include <machine/atomic.h>
  351 
  352 /*
  353  * Each pageable resident page falls into one of four lists:
  354  *
  355  *      free
  356  *              Available for allocation now.
  357  *
  358  *      inactive
  359  *              Low activity, candidates for reclamation.
  360  *              This list is approximately LRU ordered.
  361  *
  362  *      laundry
  363  *              This is the list of pages that should be
  364  *              paged out next.
  365  *
  366  *      active
  367  *              Pages that are "active", i.e., they have been
  368  *              recently referenced.
  369  *
  370  */
  371 
  372 extern int vm_page_zero_count;
  373 
  374 extern vm_page_t vm_page_array;         /* First resident page in table */
  375 extern long vm_page_array_size;         /* number of vm_page_t's */
  376 extern long first_page;                 /* first physical page number */
  377 
  378 #define VM_PAGE_TO_PHYS(entry)  ((entry)->phys_addr)
  379 
  380 /*
  381  * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
  382  * page to which the given physical address belongs. The correct vm_page_t
  383  * object is returned for addresses that are not page-aligned.
  384  */
  385 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
  386 
  387 /*
  388  * Page allocation parameters for vm_page for the functions
  389  * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
  390  * vm_page_alloc_freelist().  Some functions support only a subset
  391  * of the flags, and ignore others, see the flags legend.
  392  *
  393  * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
  394  * and the vm_page_grab*() functions.  See these functions for details.
  395  *
  396  * Bits 0 - 1 define class.
  397  * Bits 2 - 15 dedicated for flags.
  398  * Legend:
  399  * (a) - vm_page_alloc() supports the flag.
  400  * (c) - vm_page_alloc_contig() supports the flag.
  401  * (f) - vm_page_alloc_freelist() supports the flag.
  402  * (g) - vm_page_grab() supports the flag.
  403  * (p) - vm_page_grab_pages() supports the flag.
  404  * Bits above 15 define the count of additional pages that the caller
  405  * intends to allocate.
  406  */
  407 #define VM_ALLOC_NORMAL         0
  408 #define VM_ALLOC_INTERRUPT      1
  409 #define VM_ALLOC_SYSTEM         2
  410 #define VM_ALLOC_CLASS_MASK     3
  411 #define VM_ALLOC_WAITOK         0x0008  /* (acf) Sleep and retry */
  412 #define VM_ALLOC_WAITFAIL       0x0010  /* (acf) Sleep and return error */
  413 #define VM_ALLOC_WIRED          0x0020  /* (acfgp) Allocate a wired page */
  414 #define VM_ALLOC_ZERO           0x0040  /* (acfgp) Allocate a prezeroed page */
  415 #define VM_ALLOC_NOOBJ          0x0100  /* (acg) No associated object */
  416 #define VM_ALLOC_NOBUSY         0x0200  /* (acgp) Do not excl busy the page */
  417 #define VM_ALLOC_IFCACHED       0x0400
  418 #define VM_ALLOC_IFNOTCACHED    0x0800
  419 #define VM_ALLOC_IGN_SBUSY      0x1000  /* (gp) Ignore shared busy flag */
  420 #define VM_ALLOC_NODUMP         0x2000  /* (ag) don't include in dump */
  421 #define VM_ALLOC_SBUSY          0x4000  /* (acgp) Shared busy the page */
  422 #define VM_ALLOC_NOWAIT         0x8000  /* (acfgp) Do not sleep */
  423 #define VM_ALLOC_COUNT_SHIFT    16
  424 #define VM_ALLOC_COUNT(count)   ((count) << VM_ALLOC_COUNT_SHIFT)
  425 
  426 #ifdef M_NOWAIT
  427 static inline int
  428 malloc2vm_flags(int malloc_flags)
  429 {
  430         int pflags;
  431 
  432         KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
  433             (malloc_flags & M_NOWAIT) != 0,
  434             ("M_USE_RESERVE requires M_NOWAIT"));
  435         pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
  436             VM_ALLOC_SYSTEM;
  437         if ((malloc_flags & M_ZERO) != 0)
  438                 pflags |= VM_ALLOC_ZERO;
  439         if ((malloc_flags & M_NODUMP) != 0)
  440                 pflags |= VM_ALLOC_NODUMP;
  441         if ((malloc_flags & M_NOWAIT))
  442                 pflags |= VM_ALLOC_NOWAIT;
  443         if ((malloc_flags & M_WAITOK))
  444                 pflags |= VM_ALLOC_WAITOK;
  445         return (pflags);
  446 }
  447 #endif
  448 
  449 /*
  450  * Predicates supported by vm_page_ps_test():
  451  *
  452  *      PS_ALL_DIRTY is true only if the entire (super)page is dirty.
  453  *      However, it can be spuriously false when the (super)page has become
  454  *      dirty in the pmap but that information has not been propagated to the
  455  *      machine-independent layer.
  456  */
  457 #define PS_ALL_DIRTY    0x1
  458 #define PS_ALL_VALID    0x2
  459 #define PS_NONE_BUSY    0x4
  460 
  461 void vm_page_busy_downgrade(vm_page_t m);
  462 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
  463 void vm_page_flash(vm_page_t m);
  464 void vm_page_hold(vm_page_t mem);
  465 void vm_page_unhold(vm_page_t mem);
  466 void vm_page_free(vm_page_t m);
  467 void vm_page_free_zero(vm_page_t m);
  468 
  469 void vm_page_activate (vm_page_t);
  470 void vm_page_advise(vm_page_t m, int advice);
  471 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
  472 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
  473 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
  474     u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
  475     vm_paddr_t boundary, vm_memattr_t memattr);
  476 vm_page_t vm_page_alloc_freelist(int, int);
  477 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
  478 void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
  479 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
  480 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
  481     vm_page_t *ma, int count);
  482 void vm_page_deactivate (vm_page_t);
  483 void vm_page_deactivate_noreuse(vm_page_t);
  484 void vm_page_dequeue(vm_page_t m);
  485 void vm_page_dequeue_locked(vm_page_t m);
  486 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
  487 void vm_page_free_phys_pglist(struct pglist *tq);
  488 bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
  489 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
  490 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
  491 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
  492 void vm_page_launder(vm_page_t m);
  493 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
  494 vm_page_t vm_page_next(vm_page_t m);
  495 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
  496 struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
  497 vm_page_t vm_page_prev(vm_page_t m);
  498 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
  499 void vm_page_putfake(vm_page_t m);
  500 void vm_page_readahead_finish(vm_page_t m);
  501 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
  502     vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
  503 void vm_page_reference(vm_page_t m);
  504 void vm_page_remove (vm_page_t);
  505 int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
  506 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
  507     vm_pindex_t pindex);
  508 void vm_page_requeue(vm_page_t m);
  509 void vm_page_requeue_locked(vm_page_t m);
  510 int vm_page_sbusied(vm_page_t m);
  511 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
  512     vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
  513 void vm_page_set_valid_range(vm_page_t m, int base, int size);
  514 int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
  515 vm_offset_t vm_page_startup(vm_offset_t vaddr);
  516 void vm_page_sunbusy(vm_page_t m);
  517 bool vm_page_try_to_free(vm_page_t m);
  518 int vm_page_trysbusy(vm_page_t m);
  519 void vm_page_unhold_pages(vm_page_t *ma, int count);
  520 boolean_t vm_page_unwire(vm_page_t m, uint8_t queue);
  521 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
  522 void vm_page_wire (vm_page_t);
  523 void vm_page_xunbusy_hard(vm_page_t m);
  524 void vm_page_xunbusy_maybelocked(vm_page_t m);
  525 void vm_page_set_validclean (vm_page_t, int, int);
  526 void vm_page_clear_dirty (vm_page_t, int, int);
  527 void vm_page_set_invalid (vm_page_t, int, int);
  528 int vm_page_is_valid (vm_page_t, int, int);
  529 void vm_page_test_dirty (vm_page_t);
  530 vm_page_bits_t vm_page_bits(int base, int size);
  531 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
  532 void vm_page_free_toq(vm_page_t m);
  533 void vm_page_zero_idle_wakeup(void);
  534 
  535 void vm_page_dirty_KBI(vm_page_t m);
  536 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
  537 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
  538 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
  539 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
  540 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
  541 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
  542 #endif
  543 
  544 #define vm_page_assert_sbusied(m)                                       \
  545         KASSERT(vm_page_sbusied(m),                                     \
  546             ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
  547             (m), __FILE__, __LINE__))
  548 
  549 #define vm_page_assert_unbusied(m)                                      \
  550         KASSERT(!vm_page_busied(m),                                     \
  551             ("vm_page_assert_unbusied: page %p busy @ %s:%d",           \
  552             (m), __FILE__, __LINE__))
  553 
  554 #define vm_page_assert_xbusied(m)                                       \
  555         KASSERT(vm_page_xbusied(m),                                     \
  556             ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
  557             (m), __FILE__, __LINE__))
  558 
  559 #define vm_page_busied(m)                                               \
  560         ((m)->busy_lock != VPB_UNBUSIED)
  561 
  562 #define vm_page_sbusy(m) do {                                           \
  563         if (!vm_page_trysbusy(m))                                       \
  564                 panic("%s: page %p failed shared busying", __func__,    \
  565                     (m));                                               \
  566 } while (0)
  567 
  568 #define vm_page_tryxbusy(m)                                             \
  569         (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,           \
  570             VPB_SINGLE_EXCLUSIVER))
  571 
  572 #define vm_page_xbusied(m)                                              \
  573         (((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
  574 
  575 #define vm_page_xbusy(m) do {                                           \
  576         if (!vm_page_tryxbusy(m))                                       \
  577                 panic("%s: page %p failed exclusive busying", __func__, \
  578                     (m));                                               \
  579 } while (0)
  580 
  581 /* Note: page m's lock must not be owned by the caller. */
  582 #define vm_page_xunbusy(m) do {                                         \
  583         if (!atomic_cmpset_rel_int(&(m)->busy_lock,                     \
  584             VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED))                       \
  585                 vm_page_xunbusy_hard(m);                                \
  586 } while (0)
  587 
  588 #ifdef INVARIANTS
  589 void vm_page_object_lock_assert(vm_page_t m);
  590 #define VM_PAGE_OBJECT_LOCK_ASSERT(m)   vm_page_object_lock_assert(m)
  591 void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
  592 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)                           \
  593         vm_page_assert_pga_writeable(m, bits)
  594 #else
  595 #define VM_PAGE_OBJECT_LOCK_ASSERT(m)   (void)0
  596 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)   (void)0
  597 #endif
  598 
  599 /*
  600  * We want to use atomic updates for the aflags field, which is 8 bits wide.
  601  * However, not all architectures support atomic operations on 8-bit
  602  * destinations.  In order that we can easily use a 32-bit operation, we
  603  * require that the aflags field be 32-bit aligned.
  604  */
  605 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
  606 
  607 /*
  608  *      Clear the given bits in the specified page.
  609  */
  610 static inline void
  611 vm_page_aflag_clear(vm_page_t m, uint8_t bits)
  612 {
  613         uint32_t *addr, val;
  614 
  615         /*
  616          * The PGA_REFERENCED flag can only be cleared if the page is locked.
  617          */
  618         if ((bits & PGA_REFERENCED) != 0)
  619                 vm_page_assert_locked(m);
  620 
  621         /*
  622          * Access the whole 32-bit word containing the aflags field with an
  623          * atomic update.  Parallel non-atomic updates to the other fields
  624          * within this word are handled properly by the atomic update.
  625          */
  626         addr = (void *)&m->aflags;
  627         KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
  628             ("vm_page_aflag_clear: aflags is misaligned"));
  629         val = bits;
  630 #if BYTE_ORDER == BIG_ENDIAN
  631         val <<= 24;
  632 #endif
  633         atomic_clear_32(addr, val);
  634 }
  635 
  636 /*
  637  *      Set the given bits in the specified page.
  638  */
  639 static inline void
  640 vm_page_aflag_set(vm_page_t m, uint8_t bits)
  641 {
  642         uint32_t *addr, val;
  643 
  644         VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
  645 
  646         /*
  647          * Access the whole 32-bit word containing the aflags field with an
  648          * atomic update.  Parallel non-atomic updates to the other fields
  649          * within this word are handled properly by the atomic update.
  650          */
  651         addr = (void *)&m->aflags;
  652         KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
  653             ("vm_page_aflag_set: aflags is misaligned"));
  654         val = bits;
  655 #if BYTE_ORDER == BIG_ENDIAN
  656         val <<= 24;
  657 #endif
  658         atomic_set_32(addr, val);
  659 } 
  660 
  661 /*
  662  *      vm_page_dirty:
  663  *
  664  *      Set all bits in the page's dirty field.
  665  *
  666  *      The object containing the specified page must be locked if the
  667  *      call is made from the machine-independent layer.
  668  *
  669  *      See vm_page_clear_dirty_mask().
  670  */
  671 static __inline void
  672 vm_page_dirty(vm_page_t m)
  673 {
  674 
  675         /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
  676 #if defined(KLD_MODULE) || defined(INVARIANTS)
  677         vm_page_dirty_KBI(m);
  678 #else
  679         m->dirty = VM_PAGE_BITS_ALL;
  680 #endif
  681 }
  682 
  683 /*
  684  *      vm_page_remque:
  685  *
  686  *      If the given page is in a page queue, then remove it from that page
  687  *      queue.
  688  *
  689  *      The page must be locked.
  690  */
  691 static inline void
  692 vm_page_remque(vm_page_t m)
  693 {
  694 
  695         if (m->queue != PQ_NONE)
  696                 vm_page_dequeue(m);
  697 }
  698 
  699 /*
  700  *      vm_page_undirty:
  701  *
  702  *      Set page to not be dirty.  Note: does not clear pmap modify bits
  703  */
  704 static __inline void
  705 vm_page_undirty(vm_page_t m)
  706 {
  707 
  708         VM_PAGE_OBJECT_LOCK_ASSERT(m);
  709         m->dirty = 0;
  710 }
  711 
  712 static inline void
  713 vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
  714     vm_page_t mold)
  715 {
  716         vm_page_t mret;
  717 
  718         mret = vm_page_replace(mnew, object, pindex);
  719         KASSERT(mret == mold,
  720             ("invalid page replacement, mold=%p, mret=%p", mold, mret));
  721 
  722         /* Unused if !INVARIANTS. */
  723         (void)mold;
  724         (void)mret;
  725 }
  726 
  727 static inline bool
  728 vm_page_active(vm_page_t m)
  729 {
  730 
  731         return (m->queue == PQ_ACTIVE);
  732 }
  733 
  734 static inline bool
  735 vm_page_inactive(vm_page_t m)
  736 {
  737 
  738         return (m->queue == PQ_INACTIVE);
  739 }
  740 
  741 static inline bool
  742 vm_page_in_laundry(vm_page_t m)
  743 {
  744 
  745         return (m->queue == PQ_LAUNDRY);
  746 }
  747 
  748 #endif                          /* _KERNEL */
  749 #endif                          /* !_VM_PAGE_ */

Cache object: 7a8fb68998c646854c4ec76ca20df167


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.