The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pagequeue.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
    3  *
    4  * Copyright (c) 1991, 1993
    5  *      The Regents of the University of California.  All rights reserved.
    6  *
    7  * This code is derived from software contributed to Berkeley by
    8  * The Mach Operating System project at Carnegie-Mellon University.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      from: @(#)vm_page.h     8.2 (Berkeley) 12/13/93
   35  *
   36  *
   37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   38  * All rights reserved.
   39  *
   40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   41  *
   42  * Permission to use, copy, modify and distribute this software and
   43  * its documentation is hereby granted, provided that both the copyright
   44  * notice and this permission notice appear in all copies of the
   45  * software, derivative works or modified versions, and any portions
   46  * thereof, and that both notices appear in supporting documentation.
   47  *
   48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   51  *
   52  * Carnegie Mellon requests users of this software to return to
   53  *
   54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   55  *  School of Computer Science
   56  *  Carnegie Mellon University
   57  *  Pittsburgh PA 15213-3890
   58  *
   59  * any improvements or extensions that they make and grant Carnegie the
   60  * rights to redistribute these changes.
   61  *
   62  * $FreeBSD$
   63  */
   64 
   65 #ifndef _VM_PAGEQUEUE_
   66 #define _VM_PAGEQUEUE_
   67 
   68 #ifdef _KERNEL
   69 struct vm_pagequeue {
   70         struct mtx      pq_mutex;
   71         struct pglist   pq_pl;
   72         int             pq_cnt;
   73         const char      * const pq_name;
   74         uint64_t        pq_pdpages;
   75 } __aligned(CACHE_LINE_SIZE);
   76 
   77 #ifndef VM_BATCHQUEUE_SIZE
   78 #define VM_BATCHQUEUE_SIZE      7
   79 #endif
   80 
   81 struct vm_batchqueue {
   82         vm_page_t       bq_pa[VM_BATCHQUEUE_SIZE];
   83         int             bq_cnt;
   84 } __aligned(CACHE_LINE_SIZE);
   85 
   86 #include <vm/uma.h>
   87 #include <sys/pidctrl.h>
   88 struct sysctl_oid;
   89 
   90 /*
   91  * One vm_domain per-numa domain.  Contains pagequeues, free page structures,
   92  * and accounting.
   93  *
   94  * Lock Key:
   95  * f    vmd_free_mtx
   96  * p    vmd_pageout_mtx
   97  * d    vm_domainset_lock
   98  * a    atomic
   99  * c    const after boot
  100  * q    page queue lock
  101 */
  102 struct vm_domain {
  103         struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
  104         struct mtx_padalign vmd_free_mtx;
  105         struct mtx_padalign vmd_pageout_mtx;
  106         struct vm_pgcache {
  107                 int domain;
  108                 int pool;
  109                 uma_zone_t zone;
  110         } vmd_pgcache[VM_NFREEPOOL];
  111         struct vmem *vmd_kernel_arena;  /* (c) per-domain kva R/W arena. */
  112         struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
  113         u_int vmd_domain;               /* (c) Domain number. */
  114         u_int vmd_page_count;           /* (c) Total page count. */
  115         long vmd_segs;                  /* (c) bitmask of the segments */
  116         u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
  117         u_int vmd_pageout_deficit;      /* (a) Estimated number of pages deficit */
  118         uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
  119 
  120         /* Paging control variables, used within single threaded page daemon. */
  121         struct pidctrl vmd_pid;         /* Pageout controller. */
  122         boolean_t vmd_oom;
  123         int vmd_oom_seq;
  124         int vmd_last_active_scan;
  125         struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
  126         struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
  127         struct vm_page vmd_clock[2]; /* markers for active queue scan */
  128 
  129         int vmd_pageout_wanted;         /* (a, p) pageout daemon wait channel */
  130         int vmd_pageout_pages_needed;   /* (d) page daemon waiting for pages? */
  131         bool vmd_minset;                /* (d) Are we in vm_min_domains? */
  132         bool vmd_severeset;             /* (d) Are we in vm_severe_domains? */
  133         enum {
  134                 VM_LAUNDRY_IDLE = 0,
  135                 VM_LAUNDRY_BACKGROUND,
  136                 VM_LAUNDRY_SHORTFALL
  137         } vmd_laundry_request;
  138 
  139         /* Paging thresholds and targets. */
  140         u_int vmd_clean_pages_freed;    /* (q) accumulator for laundry thread */
  141         u_int vmd_background_launder_target; /* (c) */
  142         u_int vmd_free_reserved;        /* (c) pages reserved for deadlock */
  143         u_int vmd_free_target;          /* (c) pages desired free */
  144         u_int vmd_free_min;             /* (c) pages desired free */
  145         u_int vmd_inactive_target;      /* (c) pages desired inactive */
  146         u_int vmd_pageout_free_min;     /* (c) min pages reserved for kernel */
  147         u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
  148         u_int vmd_interrupt_free_min;   /* (c) reserved pages for int code */
  149         u_int vmd_free_severe;          /* (c) severe page depletion point */
  150 
  151         /* Name for sysctl etc. */
  152         struct sysctl_oid *vmd_oid;
  153         char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
  154 } __aligned(CACHE_LINE_SIZE);
  155 
  156 extern struct vm_domain vm_dom[MAXMEMDOM];
  157 
  158 #define VM_DOMAIN(n)            (&vm_dom[(n)])
  159 #define VM_DOMAIN_EMPTY(n)      (vm_dom[(n)].vmd_page_count == 0)
  160 
  161 #define vm_pagequeue_assert_locked(pq)  mtx_assert(&(pq)->pq_mutex, MA_OWNED)
  162 #define vm_pagequeue_lock(pq)           mtx_lock(&(pq)->pq_mutex)
  163 #define vm_pagequeue_lockptr(pq)        (&(pq)->pq_mutex)
  164 #define vm_pagequeue_trylock(pq)        mtx_trylock(&(pq)->pq_mutex)
  165 #define vm_pagequeue_unlock(pq)         mtx_unlock(&(pq)->pq_mutex)
  166 
  167 #define vm_domain_free_assert_locked(n)                                 \
  168             mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
  169 #define vm_domain_free_assert_unlocked(n)                               \
  170             mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
  171 #define vm_domain_free_lock(d)                                          \
  172             mtx_lock(vm_domain_free_lockptr((d)))
  173 #define vm_domain_free_lockptr(d)                                       \
  174             (&(d)->vmd_free_mtx)
  175 #define vm_domain_free_trylock(d)                                       \
  176             mtx_trylock(vm_domain_free_lockptr((d)))
  177 #define vm_domain_free_unlock(d)                                        \
  178             mtx_unlock(vm_domain_free_lockptr((d)))
  179 
  180 #define vm_domain_pageout_lockptr(d)                                    \
  181             (&(d)->vmd_pageout_mtx)
  182 #define vm_domain_pageout_assert_locked(n)                              \
  183             mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
  184 #define vm_domain_pageout_assert_unlocked(n)                            \
  185             mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
  186 #define vm_domain_pageout_lock(d)                                       \
  187             mtx_lock(vm_domain_pageout_lockptr((d)))
  188 #define vm_domain_pageout_unlock(d)                                     \
  189             mtx_unlock(vm_domain_pageout_lockptr((d)))
  190 
  191 static __inline void
  192 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
  193 {
  194 
  195         vm_pagequeue_assert_locked(pq);
  196         pq->pq_cnt += addend;
  197 }
  198 #define vm_pagequeue_cnt_inc(pq)        vm_pagequeue_cnt_add((pq), 1)
  199 #define vm_pagequeue_cnt_dec(pq)        vm_pagequeue_cnt_add((pq), -1)
  200 
  201 static inline void
  202 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
  203 {
  204 
  205         TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
  206         vm_pagequeue_cnt_dec(pq);
  207 }
  208 
  209 static inline void
  210 vm_batchqueue_init(struct vm_batchqueue *bq)
  211 {
  212 
  213         bq->bq_cnt = 0;
  214 }
  215 
  216 static inline bool
  217 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
  218 {
  219 
  220         if (bq->bq_cnt < nitems(bq->bq_pa)) {
  221                 bq->bq_pa[bq->bq_cnt++] = m;
  222                 return (true);
  223         }
  224         return (false);
  225 }
  226 
  227 static inline vm_page_t
  228 vm_batchqueue_pop(struct vm_batchqueue *bq)
  229 {
  230 
  231         if (bq->bq_cnt == 0)
  232                 return (NULL);
  233         return (bq->bq_pa[--bq->bq_cnt]);
  234 }
  235 
  236 void vm_domain_set(struct vm_domain *vmd);
  237 void vm_domain_clear(struct vm_domain *vmd);
  238 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
  239 
  240 /*
  241  *      vm_pagequeue_domain:
  242  *
  243  *      Return the memory domain the page belongs to.
  244  */
  245 static inline struct vm_domain *
  246 vm_pagequeue_domain(vm_page_t m)
  247 {
  248 
  249         return (VM_DOMAIN(vm_phys_domain(m)));
  250 }
  251 
  252 /*
  253  * Return the number of pages we need to free-up or cache
  254  * A positive number indicates that we do not have enough free pages.
  255  */
  256 static inline int
  257 vm_paging_target(struct vm_domain *vmd)
  258 {
  259 
  260         return (vmd->vmd_free_target - vmd->vmd_free_count);
  261 }
  262 
  263 /*
  264  * Returns TRUE if the pagedaemon needs to be woken up.
  265  */
  266 static inline int
  267 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
  268 {
  269 
  270         return (free_count < vmd->vmd_pageout_wakeup_thresh);
  271 }
  272 
  273 /*
  274  * Returns TRUE if the domain is below the min paging target.
  275  */
  276 static inline int
  277 vm_paging_min(struct vm_domain *vmd)
  278 {
  279 
  280         return (vmd->vmd_free_min > vmd->vmd_free_count);
  281 }
  282 
  283 /*
  284  * Returns TRUE if the domain is below the severe paging target.
  285  */
  286 static inline int
  287 vm_paging_severe(struct vm_domain *vmd)
  288 {
  289 
  290         return (vmd->vmd_free_severe > vmd->vmd_free_count);
  291 }
  292 
  293 /*
  294  * Return the number of pages we need to launder.
  295  * A positive number indicates that we have a shortfall of clean pages.
  296  */
  297 static inline int
  298 vm_laundry_target(struct vm_domain *vmd)
  299 {
  300 
  301         return (vm_paging_target(vmd));
  302 }
  303 
  304 void pagedaemon_wakeup(int domain);
  305 
  306 static inline void
  307 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
  308 {
  309         u_int old, new;
  310 
  311         old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
  312         new = old + adj;
  313         /*
  314          * Only update bitsets on transitions.  Notice we short-circuit the
  315          * rest of the checks if we're above min already.
  316          */
  317         if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
  318             (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
  319             (old < vmd->vmd_pageout_free_min &&
  320             new >= vmd->vmd_pageout_free_min)))
  321                 vm_domain_clear(vmd);
  322 }
  323 
  324 #endif  /* _KERNEL */
  325 #endif                          /* !_VM_PAGEQUEUE_ */

Cache object: a7d3e8792f76fd39448a723d96e57c30


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.