The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_meter.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: uvm_meter.c,v 1.80 2020/06/14 21:41:42 ad Exp $        */
    2 
    3 /*
    4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
    5  * Copyright (c) 1982, 1986, 1989, 1993
    6  *      The Regents of the University of California.
    7  *
    8  * All rights reserved.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. Neither the name of the University nor the names of its contributors
   19  *    may be used to endorse or promote products derived from this software
   20  *    without specific prior written permission.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  *
   34  *      @(#)vm_meter.c  8.4 (Berkeley) 1/4/94
   35  * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
   36  */
   37 
   38 #include <sys/cdefs.h>
   39 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.80 2020/06/14 21:41:42 ad Exp $");
   40 
   41 #include <sys/param.h>
   42 #include <sys/systm.h>
   43 #include <sys/cpu.h>
   44 #include <sys/proc.h>
   45 #include <sys/kernel.h>
   46 #include <sys/sysctl.h>
   47 
   48 #include <uvm/uvm.h>
   49 #include <uvm/uvm_pdpolicy.h>
   50 
   51 /*
   52  * maxslp: ???? XXXCDC
   53  */
   54 
   55 int maxslp = MAXSLP;    /* patchable ... */
   56 struct loadavg averunnable;
   57 
   58 static void uvm_total(struct vmtotal *);
   59 
   60 /*
   61  * sysctl helper routine for the vm.vmmeter node.
   62  */
   63 static int
   64 sysctl_vm_meter(SYSCTLFN_ARGS)
   65 {
   66         struct sysctlnode node;
   67         struct vmtotal vmtotals;
   68 
   69         node = *rnode;
   70         node.sysctl_data = &vmtotals;
   71         uvm_total(&vmtotals);
   72 
   73         return (sysctl_lookup(SYSCTLFN_CALL(&node)));
   74 }
   75 
   76 /*
   77  * sysctl helper routine for the vm.uvmexp node.
   78  */
   79 static int
   80 sysctl_vm_uvmexp(SYSCTLFN_ARGS)
   81 {
   82         struct sysctlnode node;
   83 
   84         uvm_update_uvmexp();
   85 
   86         node = *rnode;
   87         if (oldlenp)
   88                 node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
   89 
   90         return (sysctl_lookup(SYSCTLFN_CALL(&node)));
   91 }
   92 
   93 static int
   94 sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
   95 {
   96         struct sysctlnode node;
   97         struct uvmexp_sysctl u;
   98         int active, inactive;
   99 
  100         uvm_estimatepageable(&active, &inactive);
  101 
  102         memset(&u, 0, sizeof(u));
  103 
  104         /* uvm_availmem() will sync the counters if old. */
  105         u.free = uvm_availmem(true);
  106         u.pagesize = uvmexp.pagesize;
  107         u.pagemask = uvmexp.pagemask;
  108         u.pageshift = uvmexp.pageshift;
  109         u.npages = uvmexp.npages;
  110         u.active = active;
  111         u.inactive = inactive;
  112         u.paging = uvmexp.paging;
  113         u.wired = uvmexp.wired;
  114         u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
  115         u.reserve_kernel = uvmexp.reserve_kernel;
  116         u.freemin = uvmexp.freemin;
  117         u.freetarg = uvmexp.freetarg;
  118         u.inactarg = 0; /* unused */
  119         u.wiredmax = uvmexp.wiredmax;
  120         u.nswapdev = uvmexp.nswapdev;
  121         u.swpages = uvmexp.swpages;
  122         u.swpginuse = uvmexp.swpginuse;
  123         u.swpgonly = uvmexp.swpgonly;
  124         u.nswget = uvmexp.nswget;
  125         u.cpuhit = cpu_count_get(CPU_COUNT_CPUHIT);
  126         u.cpumiss = cpu_count_get(CPU_COUNT_CPUMISS);
  127         u.faults = cpu_count_get(CPU_COUNT_NFAULT);
  128         u.traps = cpu_count_get(CPU_COUNT_NTRAP);
  129         u.intrs = cpu_count_get(CPU_COUNT_NINTR);
  130         u.swtch = cpu_count_get(CPU_COUNT_NSWTCH);
  131         u.softs = cpu_count_get(CPU_COUNT_NSOFT);
  132         u.syscalls = cpu_count_get(CPU_COUNT_NSYSCALL);
  133         u.pageins = cpu_count_get(CPU_COUNT_PAGEINS);
  134         u.pgswapin = 0; /* unused */
  135         u.pgswapout = uvmexp.pgswapout;
  136         u.forks = cpu_count_get(CPU_COUNT_FORKS);
  137         u.forks_ppwait = cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
  138         u.forks_sharevm = cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
  139         u.zeroaborts = uvmexp.zeroaborts;
  140         u.fltnoram = cpu_count_get(CPU_COUNT_FLTNORAM);
  141         u.fltnoanon = cpu_count_get(CPU_COUNT_FLTNOANON);
  142         u.fltpgwait = cpu_count_get(CPU_COUNT_FLTPGWAIT);
  143         u.fltpgrele = cpu_count_get(CPU_COUNT_FLTPGRELE);
  144         u.fltrelck = cpu_count_get(CPU_COUNT_FLTRELCK);
  145         u.fltrelckok = cpu_count_get(CPU_COUNT_FLTRELCKOK);
  146         u.fltanget = cpu_count_get(CPU_COUNT_FLTANGET);
  147         u.fltanretry = cpu_count_get(CPU_COUNT_FLTANRETRY);
  148         u.fltamcopy = cpu_count_get(CPU_COUNT_FLTAMCOPY);
  149         u.fltnamap = cpu_count_get(CPU_COUNT_FLTNAMAP);
  150         u.fltnomap = cpu_count_get(CPU_COUNT_FLTNOMAP);
  151         u.fltlget = cpu_count_get(CPU_COUNT_FLTLGET);
  152         u.fltget = cpu_count_get(CPU_COUNT_FLTGET);
  153         u.flt_anon = cpu_count_get(CPU_COUNT_FLT_ANON);
  154         u.flt_acow = cpu_count_get(CPU_COUNT_FLT_ACOW);
  155         u.flt_obj = cpu_count_get(CPU_COUNT_FLT_OBJ);
  156         u.flt_prcopy = cpu_count_get(CPU_COUNT_FLT_PRCOPY);
  157         u.flt_przero = cpu_count_get(CPU_COUNT_FLT_PRZERO);
  158         u.pdwoke = uvmexp.pdwoke;
  159         u.pdrevs = uvmexp.pdrevs;
  160         u.pdfreed = uvmexp.pdfreed;
  161         u.pdscans = uvmexp.pdscans;
  162         u.pdanscan = uvmexp.pdanscan;
  163         u.pdobscan = uvmexp.pdobscan;
  164         u.pdreact = uvmexp.pdreact;
  165         u.pdbusy = uvmexp.pdbusy;
  166         u.pdpageouts = uvmexp.pdpageouts;
  167         u.pdpending = uvmexp.pdpending;
  168         u.pddeact = uvmexp.pddeact;
  169         u.execpages = cpu_count_get(CPU_COUNT_EXECPAGES);
  170         u.colorhit = cpu_count_get(CPU_COUNT_COLORHIT);
  171         u.colormiss = cpu_count_get(CPU_COUNT_COLORMISS);
  172         u.ncolors = uvmexp.ncolors;
  173         u.bootpages = uvmexp.bootpages;
  174         u.poolpages = pool_totalpages();
  175         u.countsyncall = cpu_count_get(CPU_COUNT_SYNC);
  176         u.anonunknown = cpu_count_get(CPU_COUNT_ANONUNKNOWN);
  177         u.anonclean = cpu_count_get(CPU_COUNT_ANONCLEAN);
  178         u.anondirty = cpu_count_get(CPU_COUNT_ANONDIRTY);
  179         u.fileunknown = cpu_count_get(CPU_COUNT_FILEUNKNOWN);
  180         u.fileclean = cpu_count_get(CPU_COUNT_FILECLEAN);
  181         u.filedirty = cpu_count_get(CPU_COUNT_FILEDIRTY);
  182         u.fltup = cpu_count_get(CPU_COUNT_FLTUP);
  183         u.fltnoup = cpu_count_get(CPU_COUNT_FLTNOUP);
  184         u.anonpages = u.anonclean + u.anondirty + u.anonunknown;
  185         u.filepages = u.fileclean + u.filedirty + u.fileunknown - u.execpages;
  186 
  187         node = *rnode;
  188         node.sysctl_data = &u;
  189         node.sysctl_size = sizeof(u);
  190         if (oldlenp)
  191                 node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
  192         return (sysctl_lookup(SYSCTLFN_CALL(&node)));
  193 }
  194 
  195 /*
  196  * sysctl helper routine for uvm_pctparam.
  197  */
  198 static int
  199 uvm_sysctlpctparam(SYSCTLFN_ARGS)
  200 {
  201         int t, error;
  202         struct sysctlnode node;
  203         struct uvm_pctparam *pct;
  204 
  205         pct = rnode->sysctl_data;
  206         t = pct->pct_pct;
  207 
  208         node = *rnode;
  209         node.sysctl_data = &t;
  210         error = sysctl_lookup(SYSCTLFN_CALL(&node));
  211         if (error || newp == NULL)
  212                 return error;
  213 
  214         if (t < 0 || t > 100)
  215                 return EINVAL;
  216 
  217         error = uvm_pctparam_check(pct, t);
  218         if (error) {
  219                 return error;
  220         }
  221         uvm_pctparam_set(pct, t);
  222 
  223         return (0);
  224 }
  225 
  226 /*
  227  * uvm_sysctl: sysctl hook into UVM system.
  228  */
  229 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup")
  230 {
  231 
  232         sysctl_createv(clog, 0, NULL, NULL,
  233                        CTLFLAG_PERMANENT,
  234                        CTLTYPE_STRUCT, "vmmeter",
  235                        SYSCTL_DESCR("Simple system-wide virtual memory "
  236                                     "statistics"),
  237                        sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal),
  238                        CTL_VM, VM_METER, CTL_EOL);
  239         sysctl_createv(clog, 0, NULL, NULL,
  240                        CTLFLAG_PERMANENT,
  241                        CTLTYPE_STRUCT, "loadavg",
  242                        SYSCTL_DESCR("System load average history"),
  243                        NULL, 0, &averunnable, sizeof(averunnable),
  244                        CTL_VM, VM_LOADAVG, CTL_EOL);
  245         sysctl_createv(clog, 0, NULL, NULL,
  246                        CTLFLAG_PERMANENT,
  247                        CTLTYPE_STRUCT, "uvmexp",
  248                        SYSCTL_DESCR("Detailed system-wide virtual memory "
  249                                     "statistics"),
  250                        sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp),
  251                        CTL_VM, VM_UVMEXP, CTL_EOL);
  252         sysctl_createv(clog, 0, NULL, NULL,
  253                        CTLFLAG_PERMANENT,
  254                        CTLTYPE_STRUCT, "uvmexp2",
  255                        SYSCTL_DESCR("Detailed system-wide virtual memory "
  256                                     "statistics (MI)"),
  257                        sysctl_vm_uvmexp2, 0, NULL, 0,
  258                        CTL_VM, VM_UVMEXP2, CTL_EOL);
  259         sysctl_createv(clog, 0, NULL, NULL,
  260                        CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp",
  261                        SYSCTL_DESCR("Maximum process sleep time before being "
  262                                     "swapped"),
  263                        NULL, 0, &maxslp, 0,
  264                        CTL_VM, VM_MAXSLP, CTL_EOL);
  265         sysctl_createv(clog, 0, NULL, NULL,
  266                        CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
  267                        CTLTYPE_INT, "uspace",
  268                        SYSCTL_DESCR("Number of bytes allocated for a kernel "
  269                                     "stack"),
  270                        NULL, USPACE, NULL, 0,
  271                        CTL_VM, VM_USPACE, CTL_EOL);
  272         sysctl_createv(clog, 0, NULL, NULL,
  273                        CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
  274                        CTLTYPE_LONG, "minaddress",
  275                        SYSCTL_DESCR("Minimum user address"),
  276                        NULL, VM_MIN_ADDRESS, NULL, 0,
  277                        CTL_VM, VM_MINADDRESS, CTL_EOL);
  278         sysctl_createv(clog, 0, NULL, NULL,
  279                        CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
  280                        CTLTYPE_LONG, "maxaddress",
  281                        SYSCTL_DESCR("Maximum user address"),
  282                        NULL, VM_MAX_ADDRESS, NULL, 0,
  283                        CTL_VM, VM_MAXADDRESS, CTL_EOL);
  284         sysctl_createv(clog, 0, NULL, NULL,
  285                        CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED,
  286                        CTLTYPE_INT, "guard_size",
  287                        SYSCTL_DESCR("Guard size of main thread"),
  288                        NULL, 0, &user_stack_guard_size, 0,
  289                        CTL_VM, VM_GUARD_SIZE, CTL_EOL);
  290         sysctl_createv(clog, 0, NULL, NULL,
  291                        CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED|CTLFLAG_READWRITE,
  292                        CTLTYPE_INT, "thread_guard_size",
  293                        SYSCTL_DESCR("Guard size of other threads"),
  294                        NULL, 0, &user_thread_stack_guard_size, 0,
  295                        CTL_VM, VM_THREAD_GUARD_SIZE, CTL_EOL);
  296 #ifdef PMAP_DIRECT
  297         sysctl_createv(clog, 0, NULL, NULL,
  298                        CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
  299                        CTLTYPE_BOOL, "ubc_direct",
  300                        SYSCTL_DESCR("Use direct map for UBC I/O"),
  301                        NULL, 0, &ubc_direct, 0,
  302                        CTL_VM, CTL_CREATE, CTL_EOL);
  303 #endif
  304 
  305         uvmpdpol_sysctlsetup();
  306 }
  307 
  308 /*
  309  * uvm_total: calculate the current state of the system.
  310  */
  311 static void
  312 uvm_total(struct vmtotal *totalp)
  313 {
  314         struct lwp *l;
  315 #if 0
  316         struct vm_map_entry *   entry;
  317         struct vm_map *map;
  318         int paging;
  319 #endif
  320         int freepg;
  321         int active;
  322 
  323         memset(totalp, 0, sizeof *totalp);
  324 
  325         /*
  326          * calculate process statistics
  327          */
  328         mutex_enter(&proc_lock);
  329         LIST_FOREACH(l, &alllwp, l_list) {
  330                 if (l->l_proc->p_flag & PK_SYSTEM)
  331                         continue;
  332                 switch (l->l_stat) {
  333                 case 0:
  334                         continue;
  335 
  336                 case LSSLEEP:
  337                 case LSSTOP:
  338                         if ((l->l_flag & LW_SINTR) == 0) {
  339                                 totalp->t_dw++;
  340                         } else if (l->l_slptime < maxslp) {
  341                                 totalp->t_sl++;
  342                         }
  343                         if (l->l_slptime >= maxslp)
  344                                 continue;
  345                         break;
  346 
  347                 case LSRUN:
  348                 case LSONPROC:
  349                 case LSIDL:
  350                         totalp->t_rq++;
  351                         if (l->l_stat == LSIDL)
  352                                 continue;
  353                         break;
  354                 }
  355                 /*
  356                  * note active objects
  357                  */
  358 #if 0
  359                 /*
  360                  * XXXCDC: BOGUS!  rethink this.  in the mean time
  361                  * don't do it.
  362                  */
  363                 paging = 0;
  364                 vm_map_lock(map);
  365                 for (map = &p->p_vmspace->vm_map, entry = map->header.next;
  366                     entry != &map->header; entry = entry->next) {
  367                         if (entry->is_a_map || entry->is_sub_map ||
  368                             entry->object.uvm_obj == NULL)
  369                                 continue;
  370                         /* XXX how to do this with uvm */
  371                 }
  372                 vm_map_unlock(map);
  373                 if (paging)
  374                         totalp->t_pw++;
  375 #endif
  376         }
  377         mutex_exit(&proc_lock);
  378 
  379         /*
  380          * Calculate object memory usage statistics.
  381          */
  382         freepg = uvm_availmem(true);
  383         uvm_estimatepageable(&active, NULL);
  384         totalp->t_free = freepg;
  385         totalp->t_vm = uvmexp.npages - freepg + uvmexp.swpginuse;
  386         totalp->t_avm = active + uvmexp.swpginuse;      /* XXX */
  387         totalp->t_rm = uvmexp.npages - freepg;
  388         totalp->t_arm = active;
  389         totalp->t_vmshr = 0;            /* XXX */
  390         totalp->t_avmshr = 0;           /* XXX */
  391         totalp->t_rmshr = 0;            /* XXX */
  392         totalp->t_armshr = 0;           /* XXX */
  393 }
  394 
  395 void
  396 uvm_pctparam_set(struct uvm_pctparam *pct, int val)
  397 {
  398 
  399         pct->pct_pct = val;
  400         pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100;
  401 }
  402 
  403 int
  404 uvm_pctparam_get(struct uvm_pctparam *pct)
  405 {
  406 
  407         return pct->pct_pct;
  408 }
  409 
  410 int
  411 uvm_pctparam_check(struct uvm_pctparam *pct, int val)
  412 {
  413 
  414         if (pct->pct_check == NULL) {
  415                 return 0;
  416         }
  417         return (*pct->pct_check)(pct, val);
  418 }
  419 
  420 void
  421 uvm_pctparam_init(struct uvm_pctparam *pct, int val,
  422     int (*fn)(struct uvm_pctparam *, int))
  423 {
  424 
  425         pct->pct_check = fn;
  426         uvm_pctparam_set(pct, val);
  427 }
  428 
  429 int
  430 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
  431     const char *desc)
  432 {
  433 
  434         return sysctl_createv(NULL, 0, NULL, NULL,
  435             CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
  436             CTLTYPE_INT, name, SYSCTL_DESCR(desc),
  437             uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
  438 }
  439 
  440 /*
  441  * Update uvmexp with aggregate values from the per-CPU counters.
  442  */
  443 void
  444 uvm_update_uvmexp(void)
  445 {
  446 
  447         /* uvm_availmem() will sync the counters if old. */
  448         uvmexp.free = (int)uvm_availmem(true);
  449         uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
  450         uvmexp.cpumiss = (int)cpu_count_get(CPU_COUNT_CPUMISS);
  451         uvmexp.faults = (int)cpu_count_get(CPU_COUNT_NFAULT);
  452         uvmexp.traps = (int)cpu_count_get(CPU_COUNT_NTRAP);
  453         uvmexp.intrs = (int)cpu_count_get(CPU_COUNT_NINTR);
  454         uvmexp.swtch = (int)cpu_count_get(CPU_COUNT_NSWTCH);
  455         uvmexp.softs = (int)cpu_count_get(CPU_COUNT_NSOFT);
  456         uvmexp.syscalls = (int)cpu_count_get(CPU_COUNT_NSYSCALL);
  457         uvmexp.pageins = (int)cpu_count_get(CPU_COUNT_PAGEINS);
  458         uvmexp.forks = (int)cpu_count_get(CPU_COUNT_FORKS);
  459         uvmexp.forks_ppwait = (int)cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
  460         uvmexp.forks_sharevm = (int)cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
  461         uvmexp.fltnoram = (int)cpu_count_get(CPU_COUNT_FLTNORAM);
  462         uvmexp.fltnoanon = (int)cpu_count_get(CPU_COUNT_FLTNOANON);
  463         uvmexp.fltpgwait = (int)cpu_count_get(CPU_COUNT_FLTPGWAIT);
  464         uvmexp.fltpgrele = (int)cpu_count_get(CPU_COUNT_FLTPGRELE);
  465         uvmexp.fltrelck = (int)cpu_count_get(CPU_COUNT_FLTRELCK);
  466         uvmexp.fltrelckok = (int)cpu_count_get(CPU_COUNT_FLTRELCKOK);
  467         uvmexp.fltanget = (int)cpu_count_get(CPU_COUNT_FLTANGET);
  468         uvmexp.fltanretry = (int)cpu_count_get(CPU_COUNT_FLTANRETRY);
  469         uvmexp.fltamcopy = (int)cpu_count_get(CPU_COUNT_FLTAMCOPY);
  470         uvmexp.fltnamap = (int)cpu_count_get(CPU_COUNT_FLTNAMAP);
  471         uvmexp.fltnomap = (int)cpu_count_get(CPU_COUNT_FLTNOMAP);
  472         uvmexp.fltlget = (int)cpu_count_get(CPU_COUNT_FLTLGET);
  473         uvmexp.fltget = (int)cpu_count_get(CPU_COUNT_FLTGET);
  474         uvmexp.flt_anon = (int)cpu_count_get(CPU_COUNT_FLT_ANON);
  475         uvmexp.flt_acow = (int)cpu_count_get(CPU_COUNT_FLT_ACOW);
  476         uvmexp.flt_obj = (int)cpu_count_get(CPU_COUNT_FLT_OBJ);
  477         uvmexp.flt_prcopy = (int)cpu_count_get(CPU_COUNT_FLT_PRCOPY);
  478         uvmexp.flt_przero = (int)cpu_count_get(CPU_COUNT_FLT_PRZERO);
  479         uvmexp.anonpages = (int)(cpu_count_get(CPU_COUNT_ANONCLEAN) +
  480             cpu_count_get(CPU_COUNT_ANONDIRTY) +
  481             cpu_count_get(CPU_COUNT_ANONUNKNOWN));
  482         uvmexp.filepages = (int)(cpu_count_get(CPU_COUNT_FILECLEAN) +
  483             cpu_count_get(CPU_COUNT_FILEDIRTY) +
  484             cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
  485             cpu_count_get(CPU_COUNT_EXECPAGES));
  486         uvmexp.execpages = (int)cpu_count_get(CPU_COUNT_EXECPAGES);
  487         uvmexp.colorhit = (int)cpu_count_get(CPU_COUNT_COLORHIT);
  488         uvmexp.colormiss = (int)cpu_count_get(CPU_COUNT_COLORMISS);
  489 }

Cache object: a0c0f3fe6000774d71bb83297d148add


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.