The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/smp.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * ----------------------------------------------------------------------------
    3  * "THE BEER-WARE LICENSE" (Revision 42):
    4  * <phk@FreeBSD.org> wrote this file.  As long as you retain this notice you
    5  * can do whatever you want with this stuff. If we meet some day, and you think
    6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
    7  * ----------------------------------------------------------------------------
    8  *
    9  * $FreeBSD: releng/11.2/sys/sys/smp.h 331909 2018-04-03 07:31:22Z avg $
   10  */
   11 
   12 #ifndef _SYS_SMP_H_
   13 #define _SYS_SMP_H_
   14 
   15 #ifdef _KERNEL
   16 
   17 #ifndef LOCORE
   18 
   19 #include <sys/cpuset.h>
   20 #include <sys/queue.h>
   21 
   22 /*
   23  * Types of nodes in the topological tree.
   24  */
   25 typedef enum {
   26         /* No node has this type; can be used in topo API calls. */
   27         TOPO_TYPE_DUMMY,
   28         /* Processing unit aka computing unit aka logical CPU. */
   29         TOPO_TYPE_PU,
   30         /* Physical subdivision of a package. */
   31         TOPO_TYPE_CORE,
   32         /* CPU L1/L2/L3 cache. */
   33         TOPO_TYPE_CACHE,
   34         /* Package aka chip, equivalent to socket. */
   35         TOPO_TYPE_PKG,
   36         /* NUMA node. */
   37         TOPO_TYPE_NODE,
   38         /* Other logical or physical grouping of PUs. */
   39         /* E.g. PUs on the same dye, or PUs sharing an FPU. */
   40         TOPO_TYPE_GROUP,
   41         /* The whole system. */
   42         TOPO_TYPE_SYSTEM
   43 } topo_node_type;
   44 
   45 /* Hardware indenitifier of a topology component. */
   46 typedef unsigned int hwid_t;
   47 /* Logical CPU idenitifier. */
   48 typedef int cpuid_t;
   49 
   50 /* A node in the topology. */
   51 struct topo_node {
   52         struct topo_node                        *parent;
   53         TAILQ_HEAD(topo_children, topo_node)    children;
   54         TAILQ_ENTRY(topo_node)                  siblings;
   55         cpuset_t                                cpuset;
   56         topo_node_type                          type;
   57         uintptr_t                               subtype;
   58         hwid_t                                  hwid;
   59         cpuid_t                                 id;
   60         int                                     nchildren;
   61         int                                     cpu_count;
   62 };
   63 
   64 /*
   65  * Scheduling topology of a NUMA or SMP system.
   66  *
   67  * The top level topology is an array of pointers to groups.  Each group
   68  * contains a bitmask of cpus in its group or subgroups.  It may also
   69  * contain a pointer to an array of child groups.
   70  *
   71  * The bitmasks at non leaf groups may be used by consumers who support
   72  * a smaller depth than the hardware provides.
   73  *
   74  * The topology may be omitted by systems where all CPUs are equal.
   75  */
   76 
   77 struct cpu_group {
   78         struct cpu_group *cg_parent;    /* Our parent group. */
   79         struct cpu_group *cg_child;     /* Optional children groups. */
   80         cpuset_t        cg_mask;        /* Mask of cpus in this group. */
   81         int32_t         cg_count;       /* Count of cpus in this group. */
   82         int16_t         cg_children;    /* Number of children groups. */
   83         int8_t          cg_level;       /* Shared cache level. */
   84         int8_t          cg_flags;       /* Traversal modifiers. */
   85 };
   86 
   87 typedef struct cpu_group *cpu_group_t;
   88 
   89 /*
   90  * Defines common resources for CPUs in the group.  The highest level
   91  * resource should be used when multiple are shared.
   92  */
   93 #define CG_SHARE_NONE   0
   94 #define CG_SHARE_L1     1
   95 #define CG_SHARE_L2     2
   96 #define CG_SHARE_L3     3
   97 
   98 #define MAX_CACHE_LEVELS        CG_SHARE_L3
   99 
  100 /*
  101  * Behavior modifiers for load balancing and affinity.
  102  */
  103 #define CG_FLAG_HTT     0x01            /* Schedule the alternate core last. */
  104 #define CG_FLAG_SMT     0x02            /* New age htt, less crippled. */
  105 #define CG_FLAG_THREAD  (CG_FLAG_HTT | CG_FLAG_SMT)     /* Any threading. */
  106 
  107 /*
  108  * Convenience routines for building and traversing topologies.
  109  */
  110 #ifdef SMP
  111 void topo_init_node(struct topo_node *node);
  112 void topo_init_root(struct topo_node *root);
  113 struct topo_node * topo_add_node_by_hwid(struct topo_node *parent, int hwid,
  114     topo_node_type type, uintptr_t subtype);
  115 struct topo_node * topo_find_node_by_hwid(struct topo_node *parent, int hwid,
  116     topo_node_type type, uintptr_t subtype);
  117 void topo_promote_child(struct topo_node *child);
  118 struct topo_node * topo_next_node(struct topo_node *top,
  119     struct topo_node *node);
  120 struct topo_node * topo_next_nonchild_node(struct topo_node *top,
  121     struct topo_node *node);
  122 void topo_set_pu_id(struct topo_node *node, cpuid_t id);
  123 int topo_analyze(struct topo_node *topo_root, int all, int *pkg_count,
  124     int *cores_per_pkg, int *thrs_per_core);
  125 
  126 #define TOPO_FOREACH(i, root)   \
  127         for (i = root; i != NULL; i = topo_next_node(root, i))
  128 
  129 struct cpu_group *smp_topo(void);
  130 struct cpu_group *smp_topo_alloc(u_int count);
  131 struct cpu_group *smp_topo_none(void);
  132 struct cpu_group *smp_topo_1level(int l1share, int l1count, int l1flags);
  133 struct cpu_group *smp_topo_2level(int l2share, int l2count, int l1share,
  134     int l1count, int l1flags);
  135 struct cpu_group *smp_topo_find(struct cpu_group *top, int cpu);
  136 
  137 extern void (*cpustop_restartfunc)(void);
  138 extern int smp_cpus;
  139 /* The suspend/resume cpusets are x86 only, but minimize ifdefs. */
  140 extern volatile cpuset_t resuming_cpus; /* woken up cpus in suspend pen */
  141 extern volatile cpuset_t started_cpus;  /* cpus to let out of stop pen */
  142 extern volatile cpuset_t stopped_cpus;  /* cpus in stop pen */
  143 extern volatile cpuset_t suspended_cpus; /* cpus [near] sleeping in susp pen */
  144 extern volatile cpuset_t toresume_cpus; /* cpus to let out of suspend pen */
  145 extern cpuset_t hlt_cpus_mask;          /* XXX 'mask' is detail in old impl */
  146 extern cpuset_t logical_cpus_mask;
  147 #endif /* SMP */
  148 
  149 extern u_int mp_maxid;
  150 extern int mp_maxcpus;
  151 extern int mp_ncpus;
  152 extern volatile int smp_started;
  153 
  154 extern cpuset_t all_cpus;
  155 extern cpuset_t cpuset_domain[MAXMEMDOM];       /* CPUs in each NUMA domain. */
  156 
  157 /*
  158  * Macro allowing us to determine whether a CPU is absent at any given
  159  * time, thus permitting us to configure sparse maps of cpuid-dependent
  160  * (per-CPU) structures.
  161  */
  162 #define CPU_ABSENT(x_cpu)       (!CPU_ISSET(x_cpu, &all_cpus))
  163 
  164 /*
  165  * Macros to iterate over non-absent CPUs.  CPU_FOREACH() takes an
  166  * integer iterator and iterates over the available set of CPUs.
  167  * CPU_FIRST() returns the id of the first non-absent CPU.  CPU_NEXT()
  168  * returns the id of the next non-absent CPU.  It will wrap back to
  169  * CPU_FIRST() once the end of the list is reached.  The iterators are
  170  * currently implemented via inline functions.
  171  */
  172 #define CPU_FOREACH(i)                                                  \
  173         for ((i) = 0; (i) <= mp_maxid; (i)++)                           \
  174                 if (!CPU_ABSENT((i)))
  175 
  176 static __inline int
  177 cpu_first(void)
  178 {
  179         int i;
  180 
  181         for (i = 0;; i++)
  182                 if (!CPU_ABSENT(i))
  183                         return (i);
  184 }
  185 
  186 static __inline int
  187 cpu_next(int i)
  188 {
  189 
  190         for (;;) {
  191                 i++;
  192                 if (i > mp_maxid)
  193                         i = 0;
  194                 if (!CPU_ABSENT(i))
  195                         return (i);
  196         }
  197 }
  198 
  199 #define CPU_FIRST()     cpu_first()
  200 #define CPU_NEXT(i)     cpu_next((i))
  201 
  202 #ifdef SMP
  203 /*
  204  * Machine dependent functions used to initialize MP support.
  205  *
  206  * The cpu_mp_probe() should check to see if MP support is present and return
  207  * zero if it is not or non-zero if it is.  If MP support is present, then
  208  * cpu_mp_start() will be called so that MP can be enabled.  This function
  209  * should do things such as startup secondary processors.  It should also
  210  * setup mp_ncpus, all_cpus, and smp_cpus.  It should also ensure that
  211  * smp_started is initialized at the appropriate time.
  212  * Once cpu_mp_start() returns, machine independent MP startup code will be
  213  * executed and a simple message will be output to the console.  Finally,
  214  * cpu_mp_announce() will be called so that machine dependent messages about
  215  * the MP support may be output to the console if desired.
  216  *
  217  * The cpu_setmaxid() function is called very early during the boot process
  218  * so that the MD code may set mp_maxid to provide an upper bound on CPU IDs
  219  * that other subsystems may use.  If a platform is not able to determine
  220  * the exact maximum ID that early, then it may set mp_maxid to MAXCPU - 1.
  221  */
  222 struct thread;
  223 
  224 struct cpu_group *cpu_topo(void);
  225 void    cpu_mp_announce(void);
  226 int     cpu_mp_probe(void);
  227 void    cpu_mp_setmaxid(void);
  228 void    cpu_mp_start(void);
  229 
  230 void    forward_signal(struct thread *);
  231 int     restart_cpus(cpuset_t);
  232 int     stop_cpus(cpuset_t);
  233 int     stop_cpus_hard(cpuset_t);
  234 #if defined(__amd64__) || defined(__i386__)
  235 int     suspend_cpus(cpuset_t);
  236 int     resume_cpus(cpuset_t);
  237 #endif
  238 
  239 void    smp_rendezvous_action(void);
  240 extern  struct mtx smp_ipi_mtx;
  241 
  242 #endif /* SMP */
  243 
  244 int     quiesce_all_cpus(const char *, int);
  245 int     quiesce_cpus(cpuset_t, const char *, int);
  246 /*
  247  * smp_no_rendevous_barrier was renamed to smp_no_rendezvous_barrier
  248  * in __FreeBSD_version 1101508, with the old name remaining in 11.x
  249  * as an alias for compatibility.  The old name will be gone in 12.0
  250  * (__FreeBSD_version >= 1200028).
  251  */
  252 void    smp_no_rendevous_barrier(void *);
  253 void    smp_no_rendezvous_barrier(void *);
  254 void    smp_rendezvous(void (*)(void *), 
  255                        void (*)(void *),
  256                        void (*)(void *),
  257                        void *arg);
  258 void    smp_rendezvous_cpus(cpuset_t,
  259                        void (*)(void *), 
  260                        void (*)(void *),
  261                        void (*)(void *),
  262                        void *arg);
  263 #endif /* !LOCORE */
  264 #endif /* _KERNEL */
  265 #endif /* _SYS_SMP_H_ */

Cache object: 861aa39c5b87c05caa97b24b420c707a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.