The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/cpu_data.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpu_data.h,v 1.52 2020/06/14 21:41:42 ad Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2004, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   26  * POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 /*
   30  * based on arch/i386/include/cpu.h:
   31  *      NetBSD: cpu.h,v 1.115 2004/05/16 12:32:53 yamt Exp
   32  */
   33 
   34 #ifndef _SYS_CPU_DATA_H_
   35 #define _SYS_CPU_DATA_H_
   36 
   37 struct callout;
   38 struct lwp;
   39 
   40 #include <sys/sched.h>  /* for schedstate_percpu */
   41 #include <sys/condvar.h>
   42 #include <sys/pcu.h>
   43 #include <sys/percpu_types.h>
   44 #include <sys/queue.h>
   45 #include <sys/kcpuset.h>
   46 #include <sys/ipi.h>
   47 
   48 /* Per-CPU counters.  New elements must be added in blocks of 8. */
   49 enum cpu_count {
   50         CPU_COUNT_NSWTCH,               /* 0 */
   51         CPU_COUNT_NSYSCALL,
   52         CPU_COUNT_NTRAP,
   53         CPU_COUNT_NINTR,
   54         CPU_COUNT_NSOFT,
   55         CPU_COUNT_FORKS,
   56         CPU_COUNT_FORKS_PPWAIT,
   57         CPU_COUNT_FORKS_SHAREVM,
   58         CPU_COUNT_COLORHIT,             /* 8 */
   59         CPU_COUNT_COLORMISS,
   60         CPU_COUNT__UNUSED3,
   61         CPU_COUNT__UNUSED4,
   62         CPU_COUNT_CPUHIT,
   63         CPU_COUNT_CPUMISS,
   64         CPU_COUNT_FREEPAGES,
   65         CPU_COUNT__UNUSED5,
   66         CPU_COUNT_PAGEINS,              /* 16 */
   67         CPU_COUNT_FLTUP,
   68         CPU_COUNT_FLTNOUP,
   69         CPU_COUNT_FLTPGWAIT,
   70         CPU_COUNT_FLTRELCK,
   71         CPU_COUNT_FLTRELCKOK,
   72         CPU_COUNT__UNUSED1,
   73         CPU_COUNT__UNUSED2,
   74         CPU_COUNT_NFAULT,               /* 24 */
   75         CPU_COUNT_FLT_ACOW,
   76         CPU_COUNT_FLT_ANON,
   77         CPU_COUNT_FLT_OBJ,
   78         CPU_COUNT_FLT_PRCOPY,
   79         CPU_COUNT_FLT_PRZERO,
   80         CPU_COUNT_FLTAMCOPY,
   81         CPU_COUNT_FLTANGET,
   82         CPU_COUNT_FLTANRETRY,           /* 32 */
   83         CPU_COUNT_FLTGET,
   84         CPU_COUNT_FLTLGET,
   85         CPU_COUNT_FLTNAMAP,
   86         CPU_COUNT_FLTNOMAP,
   87         CPU_COUNT_FLTNOANON,
   88         CPU_COUNT_FLTNORAM,
   89         CPU_COUNT_FLTPGRELE,
   90         CPU_COUNT_ANONUNKNOWN,          /* 40 */
   91         CPU_COUNT_ANONCLEAN,
   92         CPU_COUNT_ANONDIRTY,
   93         CPU_COUNT_FILEUNKNOWN,
   94         CPU_COUNT_FILECLEAN,
   95         CPU_COUNT_FILEDIRTY,
   96         CPU_COUNT_EXECPAGES,
   97         CPU_COUNT_SYNC,
   98         CPU_COUNT_MAX                   /* 48 */
   99 };
  100 
  101 /*
  102  * MI per-cpu data
  103  *
  104  * this structure is intended to be included in MD cpu_info structure.
  105  *      struct cpu_info {
  106  *              struct cpu_data ci_data;
  107  *      }
  108  *
  109  * note that cpu_data is not expected to contain much data,
  110  * as cpu_info is size-limited on most ports.
  111  */
  112 
  113 struct lockdebug;
  114 
  115 enum cpu_rel {
  116         /*
  117          * This is a circular list of peer CPUs in the same core (SMT /
  118          * Hyperthreading).  It always includes the CPU it is referenced
  119          * from as the last entry.
  120          */
  121         CPUREL_CORE,
  122 
  123         /*
  124          * This is a circular list of peer CPUs in the same physical
  125          * package.  It always includes the CPU it is referenced from as
  126          * the last entry.
  127          */
  128         CPUREL_PACKAGE,
  129 
  130         /*
  131          * This is a circular list of the first CPUs in each physical
  132          * package.  It may or may not include the CPU it is referenced
  133          * from.
  134          */
  135         CPUREL_PACKAGE1ST,
  136 
  137         /* Terminator. */
  138         CPUREL_COUNT
  139 };
  140 
  141 struct cpu_data {
  142         /*
  143          * The first section is likely to be touched by other CPUs -
  144          * it is cache hot.
  145          */
  146         u_int           cpu_index;              /* CPU index */
  147         lwp_t           *cpu_biglock_wanted;    /* LWP spinning on biglock */
  148         kcondvar_t      cpu_xcall;              /* cross-call support */
  149         int             cpu_xcall_pending;      /* cross-call support */
  150         u_int           cpu_psz_read_depth;     /* pserialize(9) read depth */
  151         uint32_t        cpu_ipipend[IPI_BITWORDS];      /* pending IPIs */
  152         struct schedstate_percpu cpu_schedstate; /* scheduler state */
  153 
  154         /* Basic topology information.  May be fake. */
  155         u_int           cpu_package_id;
  156         u_int           cpu_core_id;
  157         u_int           cpu_smt_id;
  158         u_int           cpu_numa_id;
  159         bool            cpu_is_slow;
  160         u_int           cpu_nsibling[CPUREL_COUNT];
  161         struct cpu_info *cpu_sibling[CPUREL_COUNT];
  162         struct cpu_info *cpu_package1st;        /* 1st CPU in our package */
  163 
  164         /*
  165          * This section is mostly CPU-private.
  166          */
  167         lwp_t           *cpu_idlelwp __aligned(64);/* idle lwp */
  168         void            *cpu_lockstat;          /* lockstat private tables */
  169         u_int           cpu_biglock_count;      /* # recursive holds */
  170         u_int           cpu_spin_locks;         /* # of spinlockmgr locks */
  171         u_int           cpu_simple_locks;       /* # of simple locks held */
  172         u_int           cpu_spin_locks2;        /* # of spin locks held XXX */
  173         u_int           cpu_lkdebug_recurse;    /* LOCKDEBUG recursion */
  174         u_int           cpu_softints;           /* pending (slow) softints */
  175         struct uvm_cpu  *cpu_uvm;               /* uvm per-cpu data */
  176         u_int           cpu_faultrng;           /* counter for fault rng */
  177         void            *cpu_callout;           /* per-CPU callout state */
  178         void            *cpu_softcpu;           /* soft interrupt table */
  179         TAILQ_HEAD(,buf) cpu_biodone;           /* finished block xfers */
  180         percpu_cpu_t    cpu_percpu;             /* per-cpu data */
  181         struct selcluster *cpu_selcluster;      /* per-CPU select() info */
  182         void            *cpu_nch;               /* per-cpu vfs_cache data */
  183         _TAILQ_HEAD(,struct lockdebug,volatile) cpu_ld_locks;/* !: lockdebug */
  184         __cpu_simple_lock_t cpu_ld_lock;        /* lockdebug */
  185         uint64_t        cpu_cc_freq;            /* cycle counter frequency */
  186         int64_t         cpu_cc_skew;            /* counter skew vs cpu0 */
  187         char            cpu_name[8];            /* eg, "cpu4" */
  188         kcpuset_t       *cpu_kcpuset;           /* kcpuset_t of this cpu only */
  189         struct lwp * volatile cpu_pcu_curlwp[PCU_UNIT_COUNT];
  190         int64_t         cpu_counts[CPU_COUNT_MAX];/* per-CPU counts */
  191 };
  192 
  193 #define ci_schedstate           ci_data.cpu_schedstate
  194 #define ci_index                ci_data.cpu_index
  195 #define ci_biglock_count        ci_data.cpu_biglock_count
  196 #define ci_biglock_wanted       ci_data.cpu_biglock_wanted
  197 #define ci_cpuname              ci_data.cpu_name
  198 #define ci_spin_locks           ci_data.cpu_spin_locks
  199 #define ci_simple_locks         ci_data.cpu_simple_locks
  200 #define ci_lockstat             ci_data.cpu_lockstat
  201 #define ci_spin_locks2          ci_data.cpu_spin_locks2
  202 #define ci_lkdebug_recurse      ci_data.cpu_lkdebug_recurse
  203 #define ci_pcu_curlwp           ci_data.cpu_pcu_curlwp
  204 #define ci_kcpuset              ci_data.cpu_kcpuset
  205 #define ci_ipipend              ci_data.cpu_ipipend
  206 #define ci_psz_read_depth       ci_data.cpu_psz_read_depth
  207 
  208 #define ci_package_id           ci_data.cpu_package_id
  209 #define ci_core_id              ci_data.cpu_core_id
  210 #define ci_smt_id               ci_data.cpu_smt_id
  211 #define ci_numa_id              ci_data.cpu_numa_id
  212 #define ci_is_slow              ci_data.cpu_is_slow
  213 #define ci_nsibling             ci_data.cpu_nsibling
  214 #define ci_sibling              ci_data.cpu_sibling
  215 #define ci_package1st           ci_data.cpu_package1st
  216 #define ci_faultrng             ci_data.cpu_faultrng
  217 #define ci_counts               ci_data.cpu_counts
  218 
  219 #define cpu_nsyscall            cpu_counts[CPU_COUNT_NSYSCALL]
  220 #define cpu_ntrap               cpu_counts[CPU_COUNT_NTRAP]
  221 #define cpu_nswtch              cpu_counts[CPU_COUNT_NSWTCH]
  222 #define cpu_nintr               cpu_counts[CPU_COUNT_NINTR]
  223 #define cpu_nsoft               cpu_counts[CPU_COUNT_NSOFT]
  224 #define cpu_nfault              cpu_counts[CPU_COUNT_NFAULT]
  225 
  226 void    mi_cpu_init(void);
  227 int     mi_cpu_attach(struct cpu_info *);
  228 
  229 /*
  230  * Adjust a count with preemption already disabled.  If the counter being
  231  * adjusted can be updated from interrupt context, SPL must be raised.
  232  */
  233 #define CPU_COUNT(idx, d)                                       \
  234 do {                                                            \
  235         extern bool kpreempt_disabled(void);                    \
  236         KASSERT(kpreempt_disabled());                           \
  237         KASSERT((unsigned)idx < CPU_COUNT_MAX);                 \
  238         curcpu()->ci_counts[(idx)] += (d);                      \
  239 } while (/* CONSTCOND */ 0)
  240 
  241 /*
  242  * Fetch a potentially stale count - cheap, use as often as you like.
  243  */
  244 static inline int64_t
  245 cpu_count_get(enum cpu_count idx)
  246 {
  247         extern int64_t cpu_counts[];
  248         return cpu_counts[idx];
  249 }
  250 
  251 void    cpu_count(enum cpu_count, int64_t);
  252 void    cpu_count_sync(bool);
  253 
  254 #endif /* _SYS_CPU_DATA_H_ */

Cache object: 1ef648027e798858ce8162a93af84cc6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.