The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_meter.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1982, 1986, 1989, 1993
    3  *      The Regents of the University of California.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      @(#)vm_meter.c  8.4 (Berkeley) 1/4/94
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/kernel.h>
   38 #include <sys/lock.h>
   39 #include <sys/mutex.h>
   40 #include <sys/proc.h>
   41 #include <sys/resource.h>
   42 #include <sys/rwlock.h>
   43 #include <sys/sx.h>
   44 #include <sys/vmmeter.h>
   45 #include <sys/smp.h>
   46 
   47 #include <vm/vm.h>
   48 #include <vm/vm_page.h>
   49 #include <vm/vm_extern.h>
   50 #include <vm/vm_param.h>
   51 #include <vm/pmap.h>
   52 #include <vm/vm_map.h>
   53 #include <vm/vm_object.h>
   54 #include <sys/sysctl.h>
   55 
   56 struct vmmeter cnt;
   57 
   58 SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min,
   59         CTLFLAG_RW, &cnt.v_free_min, 0, "Minimum low-free-pages threshold");
   60 SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target,
   61         CTLFLAG_RW, &cnt.v_free_target, 0, "Desired free pages");
   62 SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved,
   63         CTLFLAG_RW, &cnt.v_free_reserved, 0, "Pages reserved for deadlock");
   64 SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target,
   65         CTLFLAG_RW, &cnt.v_inactive_target, 0, "Pages desired inactive");
   66 SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min,
   67         CTLFLAG_RW, &cnt.v_cache_min, 0, "Min pages on cache queue");
   68 SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max,
   69         CTLFLAG_RW, &cnt.v_cache_max, 0, "Max pages on cache queue");
   70 SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min,
   71         CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "Min pages reserved for kernel");
   72 SYSCTL_UINT(_vm, OID_AUTO, v_free_severe,
   73         CTLFLAG_RW, &cnt.v_free_severe, 0, "Severe page depletion point");
   74 
   75 static int
   76 sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
   77 {
   78         
   79 #ifdef SCTL_MASK32
   80         u_int32_t la[4];
   81 
   82         if (req->flags & SCTL_MASK32) {
   83                 la[0] = averunnable.ldavg[0];
   84                 la[1] = averunnable.ldavg[1];
   85                 la[2] = averunnable.ldavg[2];
   86                 la[3] = averunnable.fscale;
   87                 return SYSCTL_OUT(req, la, sizeof(la));
   88         } else
   89 #endif
   90                 return SYSCTL_OUT(req, &averunnable, sizeof(averunnable));
   91 }
   92 SYSCTL_PROC(_vm, VM_LOADAVG, loadavg, CTLTYPE_STRUCT | CTLFLAG_RD |
   93     CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_loadavg, "S,loadavg",
   94     "Machine loadaverage history");
   95 
   96 /*
   97  * This function aims to determine if the object is mapped,
   98  * specifically, if it is referenced by a vm_map_entry.  Because
   99  * objects occasionally acquire transient references that do not
  100  * represent a mapping, the method used here is inexact.  However, it
  101  * has very low overhead and is good enough for the advisory
  102  * vm.vmtotal sysctl.
  103  */
  104 static bool
  105 is_object_active(vm_object_t obj)
  106 {
  107 
  108         return (obj->ref_count > obj->shadow_count);
  109 }
  110 
  111 static int
  112 vmtotal(SYSCTL_HANDLER_ARGS)
  113 {
  114         struct vmtotal total;
  115         vm_object_t object;
  116         struct proc *p;
  117         struct thread *td;
  118 
  119         bzero(&total, sizeof(total));
  120 
  121         /*
  122          * Calculate process statistics.
  123          */
  124         sx_slock(&allproc_lock);
  125         FOREACH_PROC_IN_SYSTEM(p) {
  126                 if ((p->p_flag & P_SYSTEM) != 0)
  127                         continue;
  128                 PROC_LOCK(p);
  129                 if (p->p_state != PRS_NEW) {
  130                         FOREACH_THREAD_IN_PROC(p, td) {
  131                                 thread_lock(td);
  132                                 switch (td->td_state) {
  133                                 case TDS_INHIBITED:
  134                                         if (TD_IS_SWAPPED(td))
  135                                                 total.t_sw++;
  136                                         else if (TD_IS_SLEEPING(td)) {
  137                                                 if (td->td_priority <= PZERO)
  138                                                         total.t_dw++;
  139                                                 else
  140                                                         total.t_sl++;
  141                                                 if (td->td_wchan ==
  142                                                     &cnt.v_free_count)
  143                                                         total.t_pw++;
  144                                         }
  145                                         break;
  146                                 case TDS_CAN_RUN:
  147                                         total.t_sw++;
  148                                         break;
  149                                 case TDS_RUNQ:
  150                                 case TDS_RUNNING:
  151                                         total.t_rq++;
  152                                         break;
  153                                 default:
  154                                         break;
  155                                 }
  156                                 thread_unlock(td);
  157                         }
  158                 }
  159                 PROC_UNLOCK(p);
  160         }
  161         sx_sunlock(&allproc_lock);
  162         /*
  163          * Calculate object memory usage statistics.
  164          */
  165         mtx_lock(&vm_object_list_mtx);
  166         TAILQ_FOREACH(object, &vm_object_list, object_list) {
  167                 /*
  168                  * Perform unsynchronized reads on the object.  In
  169                  * this case, the lack of synchronization should not
  170                  * impair the accuracy of the reported statistics.
  171                  */
  172                 if ((object->flags & OBJ_FICTITIOUS) != 0) {
  173                         /*
  174                          * Devices, like /dev/mem, will badly skew our totals.
  175                          */
  176                         continue;
  177                 }
  178                 if (object->ref_count == 0) {
  179                         /*
  180                          * Also skip unreferenced objects, including
  181                          * vnodes representing mounted file systems.
  182                          */
  183                         continue;
  184                 }
  185                 if (object->ref_count == 1 &&
  186                     (object->flags & OBJ_NOSPLIT) != 0) {
  187                         /*
  188                          * Also skip otherwise unreferenced swap
  189                          * objects backing tmpfs vnodes, and POSIX or
  190                          * SysV shared memory.
  191                          */
  192                         continue;
  193                 }
  194                 total.t_vm += object->size;
  195                 total.t_rm += object->resident_page_count;
  196                 if (is_object_active(object)) {
  197                         total.t_avm += object->size;
  198                         total.t_arm += object->resident_page_count;
  199                 }
  200                 if (object->shadow_count > 1) {
  201                         /* shared object */
  202                         total.t_vmshr += object->size;
  203                         total.t_rmshr += object->resident_page_count;
  204                         if (is_object_active(object)) {
  205                                 total.t_avmshr += object->size;
  206                                 total.t_armshr += object->resident_page_count;
  207                         }
  208                 }
  209         }
  210         mtx_unlock(&vm_object_list_mtx);
  211         total.t_free = cnt.v_free_count + cnt.v_cache_count;
  212         return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
  213 }
  214 
  215 /*
  216  * vcnt() -     accumulate statistics from all cpus and the global cnt
  217  *              structure.
  218  *
  219  *      The vmmeter structure is now per-cpu as well as global.  Those
  220  *      statistics which can be kept on a per-cpu basis (to avoid cache
  221  *      stalls between cpus) can be moved to the per-cpu vmmeter.  Remaining
  222  *      statistics, such as v_free_reserved, are left in the global
  223  *      structure.
  224  *
  225  * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
  226  */
  227 static int
  228 vcnt(SYSCTL_HANDLER_ARGS)
  229 {
  230         int count = *(int *)arg1;
  231         int offset = (char *)arg1 - (char *)&cnt;
  232         int i;
  233 
  234         CPU_FOREACH(i) {
  235                 struct pcpu *pcpu = pcpu_find(i);
  236                 count += *(int *)((char *)&pcpu->pc_cnt + offset);
  237         }
  238         return (SYSCTL_OUT(req, &count, sizeof(int)));
  239 }
  240 
  241 SYSCTL_PROC(_vm, VM_TOTAL, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,
  242     0, sizeof(struct vmtotal), vmtotal, "S,vmtotal", 
  243     "System virtual memory statistics");
  244 SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW, 0, "VM meter stats");
  245 static SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW, 0,
  246         "VM meter sys stats");
  247 static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0,
  248         "VM meter vm stats");
  249 SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats");
  250 
  251 #define VM_STATS(parent, var, descr) \
  252         SYSCTL_PROC(parent, OID_AUTO, var, \
  253             CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &cnt.var, 0, vcnt, \
  254             "IU", descr)
  255 #define VM_STATS_VM(var, descr)         VM_STATS(_vm_stats_vm, var, descr)
  256 #define VM_STATS_SYS(var, descr)        VM_STATS(_vm_stats_sys, var, descr)
  257 
  258 VM_STATS_SYS(v_swtch, "Context switches");
  259 VM_STATS_SYS(v_trap, "Traps");
  260 VM_STATS_SYS(v_syscall, "System calls");
  261 VM_STATS_SYS(v_intr, "Device interrupts");
  262 VM_STATS_SYS(v_soft, "Software interrupts");
  263 VM_STATS_VM(v_vm_faults, "Address memory faults");
  264 VM_STATS_VM(v_io_faults, "Page faults requiring I/O");
  265 VM_STATS_VM(v_cow_faults, "Copy-on-write faults");
  266 VM_STATS_VM(v_cow_optim, "Optimized COW faults");
  267 VM_STATS_VM(v_zfod, "Pages zero-filled on demand");
  268 VM_STATS_VM(v_ozfod, "Optimized zero fill pages");
  269 VM_STATS_VM(v_swapin, "Swap pager pageins");
  270 VM_STATS_VM(v_swapout, "Swap pager pageouts");
  271 VM_STATS_VM(v_swappgsin, "Swap pages swapped in");
  272 VM_STATS_VM(v_swappgsout, "Swap pages swapped out");
  273 VM_STATS_VM(v_vnodein, "Vnode pager pageins");
  274 VM_STATS_VM(v_vnodeout, "Vnode pager pageouts");
  275 VM_STATS_VM(v_vnodepgsin, "Vnode pages paged in");
  276 VM_STATS_VM(v_vnodepgsout, "Vnode pages paged out");
  277 VM_STATS_VM(v_intrans, "In transit page faults");
  278 VM_STATS_VM(v_reactivated, "Pages reactivated from free list");
  279 VM_STATS_VM(v_pdwakeups, "Pagedaemon wakeups");
  280 VM_STATS_VM(v_pdpages, "Pages analyzed by pagedaemon");
  281 VM_STATS_VM(v_tcached, "Total pages cached");
  282 VM_STATS_VM(v_dfree, "Pages freed by pagedaemon");
  283 VM_STATS_VM(v_pfree, "Pages freed by exiting processes");
  284 VM_STATS_VM(v_tfree, "Total pages freed");
  285 VM_STATS_VM(v_page_size, "Page size in bytes");
  286 VM_STATS_VM(v_page_count, "Total number of pages in system");
  287 VM_STATS_VM(v_free_reserved, "Pages reserved for deadlock");
  288 VM_STATS_VM(v_free_target, "Pages desired free");
  289 VM_STATS_VM(v_free_min, "Minimum low-free-pages threshold");
  290 VM_STATS_VM(v_free_count, "Free pages");
  291 VM_STATS_VM(v_wire_count, "Wired pages");
  292 VM_STATS_VM(v_active_count, "Active pages");
  293 VM_STATS_VM(v_inactive_target, "Desired inactive pages");
  294 VM_STATS_VM(v_inactive_count, "Inactive pages");
  295 VM_STATS_VM(v_cache_count, "Pages on cache queue");
  296 VM_STATS_VM(v_cache_min, "Min pages on cache queue");
  297 VM_STATS_VM(v_cache_max, "Max pages on cached queue");
  298 VM_STATS_VM(v_pageout_free_min, "Min pages reserved for kernel");
  299 VM_STATS_VM(v_interrupt_free_min, "Reserved pages for interrupt code");
  300 VM_STATS_VM(v_forks, "Number of fork() calls");
  301 VM_STATS_VM(v_vforks, "Number of vfork() calls");
  302 VM_STATS_VM(v_rforks, "Number of rfork() calls");
  303 VM_STATS_VM(v_kthreads, "Number of fork() calls by kernel");
  304 VM_STATS_VM(v_forkpages, "VM pages affected by fork()");
  305 VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()");
  306 VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()");
  307 VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel");
  308 
  309 SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD,
  310         &vm_page_zero_count, 0, "Number of zero-ed free pages");

Cache object: 0a6a922427d8326d0e3ba44228c6fe88


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.