The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_smp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 /*
   29  * This module holds the global variables and machine independent functions
   30  * used for the kernel SMP support.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include <sys/param.h>
   37 #include <sys/systm.h>
   38 #include <sys/kernel.h>
   39 #include <sys/ktr.h>
   40 #include <sys/proc.h>
   41 #include <sys/bus.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/mutex.h>
   45 #include <sys/pcpu.h>
   46 #include <sys/sched.h>
   47 #include <sys/smp.h>
   48 #include <sys/sysctl.h>
   49 
   50 #include <machine/cpu.h>
   51 #include <machine/smp.h>
   52 
   53 #include "opt_sched.h"
   54 
   55 #ifdef SMP
   56 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
   57 
   58 volatile cpuset_t stopped_cpus;
   59 volatile cpuset_t started_cpus;
   60 volatile cpuset_t suspended_cpus;
   61 cpuset_t hlt_cpus_mask;
   62 cpuset_t logical_cpus_mask;
   63 
   64 void (*cpustop_restartfunc)(void);
   65 #endif
   66 
   67 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
   68 
   69 /* This is used in modules that need to work in both SMP and UP. */
   70 cpuset_t all_cpus;
   71 
   72 int mp_ncpus;
   73 /* export this for libkvm consumers. */
   74 int mp_maxcpus = MAXCPU;
   75 
   76 volatile int smp_started;
   77 u_int mp_maxid;
   78 
   79 static SYSCTL_NODE(_kern, OID_AUTO, smp,
   80     CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL,
   81     "Kernel SMP");
   82 
   83 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
   84     "Max CPU ID.");
   85 
   86 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
   87     0, "Max number of CPUs that the system was compiled for.");
   88 
   89 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE,
   90     NULL, 0, sysctl_kern_smp_active, "I",
   91     "Indicates system is running in SMP mode");
   92 
   93 int smp_disabled = 0;   /* has smp been disabled? */
   94 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
   95     &smp_disabled, 0, "SMP has been disabled from the loader");
   96 
   97 int smp_cpus = 1;       /* how many cpu's running */
   98 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
   99     "Number of CPUs online");
  100 
  101 int smp_threads_per_core = 1;   /* how many SMT threads are running per core */
  102 SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD,
  103     &smp_threads_per_core, 0, "Number of SMT threads online per core");
  104 
  105 int mp_ncores = -1;     /* how many physical cores running */
  106 SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0,
  107     "Number of physical cores online");
  108 
  109 int smp_topology = 0;   /* Which topology we're using. */
  110 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
  111     "Topology override setting; 0 is default provided by hardware.");
  112 
  113 #ifdef SMP
  114 /* Enable forwarding of a signal to a process running on a different CPU */
  115 static int forward_signal_enabled = 1;
  116 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
  117            &forward_signal_enabled, 0,
  118            "Forwarding of a signal to a process on a different CPU");
  119 
  120 /* Variables needed for SMP rendezvous. */
  121 static volatile int smp_rv_ncpus;
  122 static void (*volatile smp_rv_setup_func)(void *arg);
  123 static void (*volatile smp_rv_action_func)(void *arg);
  124 static void (*volatile smp_rv_teardown_func)(void *arg);
  125 static void *volatile smp_rv_func_arg;
  126 static volatile int smp_rv_waiters[4];
  127 
  128 /* 
  129  * Shared mutex to restrict busywaits between smp_rendezvous() and
  130  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
  131  * functions trigger at once and cause multiple CPUs to busywait with
  132  * interrupts disabled. 
  133  */
  134 struct mtx smp_ipi_mtx;
  135 
  136 /*
  137  * Let the MD SMP code initialize mp_maxid very early if it can.
  138  */
  139 static void
  140 mp_setmaxid(void *dummy)
  141 {
  142 
  143         cpu_mp_setmaxid();
  144 
  145         KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
  146         KASSERT(mp_ncpus > 1 || mp_maxid == 0,
  147             ("%s: one CPU but mp_maxid is not zero", __func__));
  148         KASSERT(mp_maxid >= mp_ncpus - 1,
  149             ("%s: counters out of sync: max %d, count %d", __func__,
  150                 mp_maxid, mp_ncpus));
  151 }
  152 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
  153 
  154 /*
  155  * Call the MD SMP initialization code.
  156  */
  157 static void
  158 mp_start(void *dummy)
  159 {
  160 
  161         mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
  162 
  163         /* Probe for MP hardware. */
  164         if (smp_disabled != 0 || cpu_mp_probe() == 0) {
  165                 mp_ncores = 1;
  166                 mp_ncpus = 1;
  167                 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
  168                 return;
  169         }
  170 
  171         cpu_mp_start();
  172         printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
  173             mp_ncpus);
  174 
  175         /* Provide a default for most architectures that don't have SMT/HTT. */
  176         if (mp_ncores < 0)
  177                 mp_ncores = mp_ncpus;
  178 
  179         cpu_mp_announce();
  180 }
  181 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
  182 
  183 void
  184 forward_signal(struct thread *td)
  185 {
  186         int id;
  187 
  188         /*
  189          * signotify() has already set TDA_AST and TDA_SIG on td_ast for
  190          * this thread, so all we need to do is poke it if it is currently
  191          * executing so that it executes ast().
  192          */
  193         THREAD_LOCK_ASSERT(td, MA_OWNED);
  194         KASSERT(TD_IS_RUNNING(td),
  195             ("forward_signal: thread is not TDS_RUNNING"));
  196 
  197         CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
  198 
  199         if (!smp_started || cold || KERNEL_PANICKED())
  200                 return;
  201         if (!forward_signal_enabled)
  202                 return;
  203 
  204         /* No need to IPI ourself. */
  205         if (td == curthread)
  206                 return;
  207 
  208         id = td->td_oncpu;
  209         if (id == NOCPU)
  210                 return;
  211         ipi_cpu(id, IPI_AST);
  212 }
  213 
  214 /*
  215  * When called the executing CPU will send an IPI to all other CPUs
  216  *  requesting that they halt execution.
  217  *
  218  * Usually (but not necessarily) called with 'other_cpus' as its arg.
  219  *
  220  *  - Signals all CPUs in map to stop.
  221  *  - Waits for each to stop.
  222  *
  223  * Returns:
  224  *  -1: error
  225  *   0: NA
  226  *   1: ok
  227  *
  228  */
  229 #if defined(__amd64__) || defined(__i386__)
  230 #define X86     1
  231 #else
  232 #define X86     0
  233 #endif
  234 static int
  235 generic_stop_cpus(cpuset_t map, u_int type)
  236 {
  237 #ifdef KTR
  238         char cpusetbuf[CPUSETBUFSIZ];
  239 #endif
  240         static volatile u_int stopping_cpu = NOCPU;
  241         int i;
  242         volatile cpuset_t *cpus;
  243 
  244         KASSERT(
  245             type == IPI_STOP || type == IPI_STOP_HARD
  246 #if X86
  247             || type == IPI_SUSPEND
  248 #endif
  249             , ("%s: invalid stop type", __func__));
  250 
  251         if (!smp_started)
  252                 return (0);
  253 
  254         CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
  255             cpusetobj_strprint(cpusetbuf, &map), type);
  256 
  257 #if X86
  258         /*
  259          * When suspending, ensure there are are no IPIs in progress.
  260          * IPIs that have been issued, but not yet delivered (e.g.
  261          * not pending on a vCPU when running under virtualization)
  262          * will be lost, violating FreeBSD's assumption of reliable
  263          * IPI delivery.
  264          */
  265         if (type == IPI_SUSPEND)
  266                 mtx_lock_spin(&smp_ipi_mtx);
  267 #endif
  268 
  269 #if X86
  270         if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
  271 #endif
  272         if (stopping_cpu != PCPU_GET(cpuid))
  273                 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
  274                     PCPU_GET(cpuid)) == 0)
  275                         while (stopping_cpu != NOCPU)
  276                                 cpu_spinwait(); /* spin */
  277 
  278         /* send the stop IPI to all CPUs in map */
  279         ipi_selected(map, type);
  280 #if X86
  281         }
  282 #endif
  283 
  284 #if X86
  285         if (type == IPI_SUSPEND)
  286                 cpus = &suspended_cpus;
  287         else
  288 #endif
  289                 cpus = &stopped_cpus;
  290 
  291         i = 0;
  292         while (!CPU_SUBSET(cpus, &map)) {
  293                 /* spin */
  294                 cpu_spinwait();
  295                 i++;
  296                 if (i == 100000000) {
  297                         printf("timeout stopping cpus\n");
  298                         break;
  299                 }
  300         }
  301 
  302 #if X86
  303         if (type == IPI_SUSPEND)
  304                 mtx_unlock_spin(&smp_ipi_mtx);
  305 #endif
  306 
  307         stopping_cpu = NOCPU;
  308         return (1);
  309 }
  310 
  311 int
  312 stop_cpus(cpuset_t map)
  313 {
  314 
  315         return (generic_stop_cpus(map, IPI_STOP));
  316 }
  317 
  318 int
  319 stop_cpus_hard(cpuset_t map)
  320 {
  321 
  322         return (generic_stop_cpus(map, IPI_STOP_HARD));
  323 }
  324 
  325 #if X86
  326 int
  327 suspend_cpus(cpuset_t map)
  328 {
  329 
  330         return (generic_stop_cpus(map, IPI_SUSPEND));
  331 }
  332 #endif
  333 
  334 /*
  335  * Called by a CPU to restart stopped CPUs. 
  336  *
  337  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
  338  *
  339  *  - Signals all CPUs in map to restart.
  340  *  - Waits for each to restart.
  341  *
  342  * Returns:
  343  *  -1: error
  344  *   0: NA
  345  *   1: ok
  346  */
  347 static int
  348 generic_restart_cpus(cpuset_t map, u_int type)
  349 {
  350 #ifdef KTR
  351         char cpusetbuf[CPUSETBUFSIZ];
  352 #endif
  353         volatile cpuset_t *cpus;
  354 
  355 #if X86
  356         KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
  357             || type == IPI_SUSPEND, ("%s: invalid stop type", __func__));
  358 
  359         if (!smp_started)
  360                 return (0);
  361 
  362         CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
  363 
  364         if (type == IPI_SUSPEND)
  365                 cpus = &resuming_cpus;
  366         else
  367                 cpus = &stopped_cpus;
  368 
  369         /* signal other cpus to restart */
  370         if (type == IPI_SUSPEND)
  371                 CPU_COPY_STORE_REL(&map, &toresume_cpus);
  372         else
  373                 CPU_COPY_STORE_REL(&map, &started_cpus);
  374 
  375         /*
  376          * Wake up any CPUs stopped with MWAIT.  From MI code we can't tell if
  377          * MONITOR/MWAIT is enabled, but the potentially redundant writes are
  378          * relatively inexpensive.
  379          */
  380         if (type == IPI_STOP) {
  381                 struct monitorbuf *mb;
  382                 u_int id;
  383 
  384                 CPU_FOREACH(id) {
  385                         if (!CPU_ISSET(id, &map))
  386                                 continue;
  387 
  388                         mb = &pcpu_find(id)->pc_monitorbuf;
  389                         atomic_store_int(&mb->stop_state,
  390                             MONITOR_STOPSTATE_RUNNING);
  391                 }
  392         }
  393 
  394         if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
  395                 /* wait for each to clear its bit */
  396                 while (CPU_OVERLAP(cpus, &map))
  397                         cpu_spinwait();
  398         }
  399 #else /* !X86 */
  400         KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
  401             ("%s: invalid stop type", __func__));
  402 
  403         if (!smp_started)
  404                 return (0);
  405 
  406         CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
  407 
  408         cpus = &stopped_cpus;
  409 
  410         /* signal other cpus to restart */
  411         CPU_COPY_STORE_REL(&map, &started_cpus);
  412 
  413         /* wait for each to clear its bit */
  414         while (CPU_OVERLAP(cpus, &map))
  415                 cpu_spinwait();
  416 #endif
  417         return (1);
  418 }
  419 
  420 int
  421 restart_cpus(cpuset_t map)
  422 {
  423 
  424         return (generic_restart_cpus(map, IPI_STOP));
  425 }
  426 
  427 #if X86
  428 int
  429 resume_cpus(cpuset_t map)
  430 {
  431 
  432         return (generic_restart_cpus(map, IPI_SUSPEND));
  433 }
  434 #endif
  435 #undef X86
  436 
  437 /*
  438  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function 
  439  * (if specified), rendezvous, execute the action function (if specified),
  440  * rendezvous again, execute the teardown function (if specified), and then
  441  * resume.
  442  *
  443  * Note that the supplied external functions _must_ be reentrant and aware
  444  * that they are running in parallel and in an unknown lock context.
  445  */
  446 void
  447 smp_rendezvous_action(void)
  448 {
  449         struct thread *td;
  450         void *local_func_arg;
  451         void (*local_setup_func)(void*);
  452         void (*local_action_func)(void*);
  453         void (*local_teardown_func)(void*);
  454 #ifdef INVARIANTS
  455         int owepreempt;
  456 #endif
  457 
  458         /* Ensure we have up-to-date values. */
  459         atomic_add_acq_int(&smp_rv_waiters[0], 1);
  460         while (smp_rv_waiters[0] < smp_rv_ncpus)
  461                 cpu_spinwait();
  462 
  463         /* Fetch rendezvous parameters after acquire barrier. */
  464         local_func_arg = smp_rv_func_arg;
  465         local_setup_func = smp_rv_setup_func;
  466         local_action_func = smp_rv_action_func;
  467         local_teardown_func = smp_rv_teardown_func;
  468 
  469         /*
  470          * Use a nested critical section to prevent any preemptions
  471          * from occurring during a rendezvous action routine.
  472          * Specifically, if a rendezvous handler is invoked via an IPI
  473          * and the interrupted thread was in the critical_exit()
  474          * function after setting td_critnest to 0 but before
  475          * performing a deferred preemption, this routine can be
  476          * invoked with td_critnest set to 0 and td_owepreempt true.
  477          * In that case, a critical_exit() during the rendezvous
  478          * action would trigger a preemption which is not permitted in
  479          * a rendezvous action.  To fix this, wrap all of the
  480          * rendezvous action handlers in a critical section.  We
  481          * cannot use a regular critical section however as having
  482          * critical_exit() preempt from this routine would also be
  483          * problematic (the preemption must not occur before the IPI
  484          * has been acknowledged via an EOI).  Instead, we
  485          * intentionally ignore td_owepreempt when leaving the
  486          * critical section.  This should be harmless because we do
  487          * not permit rendezvous action routines to schedule threads,
  488          * and thus td_owepreempt should never transition from 0 to 1
  489          * during this routine.
  490          */
  491         td = curthread;
  492         td->td_critnest++;
  493 #ifdef INVARIANTS
  494         owepreempt = td->td_owepreempt;
  495 #endif
  496 
  497         /*
  498          * If requested, run a setup function before the main action
  499          * function.  Ensure all CPUs have completed the setup
  500          * function before moving on to the action function.
  501          */
  502         if (local_setup_func != smp_no_rendezvous_barrier) {
  503                 if (local_setup_func != NULL)
  504                         local_setup_func(local_func_arg);
  505                 atomic_add_int(&smp_rv_waiters[1], 1);
  506                 while (smp_rv_waiters[1] < smp_rv_ncpus)
  507                         cpu_spinwait();
  508         }
  509 
  510         if (local_action_func != NULL)
  511                 local_action_func(local_func_arg);
  512 
  513         if (local_teardown_func != smp_no_rendezvous_barrier) {
  514                 /*
  515                  * Signal that the main action has been completed.  If a
  516                  * full exit rendezvous is requested, then all CPUs will
  517                  * wait here until all CPUs have finished the main action.
  518                  */
  519                 atomic_add_int(&smp_rv_waiters[2], 1);
  520                 while (smp_rv_waiters[2] < smp_rv_ncpus)
  521                         cpu_spinwait();
  522 
  523                 if (local_teardown_func != NULL)
  524                         local_teardown_func(local_func_arg);
  525         }
  526 
  527         /*
  528          * Signal that the rendezvous is fully completed by this CPU.
  529          * This means that no member of smp_rv_* pseudo-structure will be
  530          * accessed by this target CPU after this point; in particular,
  531          * memory pointed by smp_rv_func_arg.
  532          *
  533          * The release semantic ensures that all accesses performed by
  534          * the current CPU are visible when smp_rendezvous_cpus()
  535          * returns, by synchronizing with the
  536          * atomic_load_acq_int(&smp_rv_waiters[3]).
  537          */
  538         atomic_add_rel_int(&smp_rv_waiters[3], 1);
  539 
  540         td->td_critnest--;
  541         KASSERT(owepreempt == td->td_owepreempt,
  542             ("rendezvous action changed td_owepreempt"));
  543 }
  544 
  545 void
  546 smp_rendezvous_cpus(cpuset_t map,
  547         void (* setup_func)(void *), 
  548         void (* action_func)(void *),
  549         void (* teardown_func)(void *),
  550         void *arg)
  551 {
  552         int curcpumap, i, ncpus = 0;
  553 
  554         /* See comments in the !SMP case. */
  555         if (!smp_started) {
  556                 spinlock_enter();
  557                 if (setup_func != NULL)
  558                         setup_func(arg);
  559                 if (action_func != NULL)
  560                         action_func(arg);
  561                 if (teardown_func != NULL)
  562                         teardown_func(arg);
  563                 spinlock_exit();
  564                 return;
  565         }
  566 
  567         /*
  568          * Make sure we come here with interrupts enabled.  Otherwise we
  569          * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI.
  570          */
  571         MPASS(curthread->td_md.md_spinlock_count == 0);
  572 
  573         CPU_FOREACH(i) {
  574                 if (CPU_ISSET(i, &map))
  575                         ncpus++;
  576         }
  577         if (ncpus == 0)
  578                 panic("ncpus is 0 with non-zero map");
  579 
  580         mtx_lock_spin(&smp_ipi_mtx);
  581 
  582         /* Pass rendezvous parameters via global variables. */
  583         smp_rv_ncpus = ncpus;
  584         smp_rv_setup_func = setup_func;
  585         smp_rv_action_func = action_func;
  586         smp_rv_teardown_func = teardown_func;
  587         smp_rv_func_arg = arg;
  588         smp_rv_waiters[1] = 0;
  589         smp_rv_waiters[2] = 0;
  590         smp_rv_waiters[3] = 0;
  591         atomic_store_rel_int(&smp_rv_waiters[0], 0);
  592 
  593         /*
  594          * Signal other processors, which will enter the IPI with
  595          * interrupts off.
  596          */
  597         curcpumap = CPU_ISSET(curcpu, &map);
  598         CPU_CLR(curcpu, &map);
  599         ipi_selected(map, IPI_RENDEZVOUS);
  600 
  601         /* Check if the current CPU is in the map */
  602         if (curcpumap != 0)
  603                 smp_rendezvous_action();
  604 
  605         /*
  606          * Ensure that the master CPU waits for all the other
  607          * CPUs to finish the rendezvous, so that smp_rv_*
  608          * pseudo-structure and the arg are guaranteed to not
  609          * be in use.
  610          *
  611          * Load acquire synchronizes with the release add in
  612          * smp_rendezvous_action(), which ensures that our caller sees
  613          * all memory actions done by the called functions on other
  614          * CPUs.
  615          */
  616         while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
  617                 cpu_spinwait();
  618 
  619         mtx_unlock_spin(&smp_ipi_mtx);
  620 }
  621 
  622 void
  623 smp_rendezvous(void (* setup_func)(void *), 
  624                void (* action_func)(void *),
  625                void (* teardown_func)(void *),
  626                void *arg)
  627 {
  628         smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
  629 }
  630 
  631 static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1];
  632 
  633 static void
  634 smp_topo_fill(struct cpu_group *cg)
  635 {
  636         int c;
  637 
  638         for (c = 0; c < cg->cg_children; c++)
  639                 smp_topo_fill(&cg->cg_child[c]);
  640         cg->cg_first = CPU_FFS(&cg->cg_mask) - 1;
  641         cg->cg_last = CPU_FLS(&cg->cg_mask) - 1;
  642 }
  643 
  644 struct cpu_group *
  645 smp_topo(void)
  646 {
  647         char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
  648         struct cpu_group *top;
  649 
  650         /*
  651          * Check for a fake topology request for debugging purposes.
  652          */
  653         switch (smp_topology) {
  654         case 1:
  655                 /* Dual core with no sharing.  */
  656                 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
  657                 break;
  658         case 2:
  659                 /* No topology, all cpus are equal. */
  660                 top = smp_topo_none();
  661                 break;
  662         case 3:
  663                 /* Dual core with shared L2.  */
  664                 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
  665                 break;
  666         case 4:
  667                 /* quad core, shared l3 among each package, private l2.  */
  668                 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
  669                 break;
  670         case 5:
  671                 /* quad core,  2 dualcore parts on each package share l2.  */
  672                 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
  673                 break;
  674         case 6:
  675                 /* Single-core 2xHTT */
  676                 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
  677                 break;
  678         case 7:
  679                 /* quad core with a shared l3, 8 threads sharing L2.  */
  680                 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
  681                     CG_FLAG_SMT);
  682                 break;
  683         default:
  684                 /* Default, ask the system what it wants. */
  685                 top = cpu_topo();
  686                 break;
  687         }
  688         /*
  689          * Verify the returned topology.
  690          */
  691         if (top->cg_count != mp_ncpus)
  692                 panic("Built bad topology at %p.  CPU count %d != %d",
  693                     top, top->cg_count, mp_ncpus);
  694         if (CPU_CMP(&top->cg_mask, &all_cpus))
  695                 panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
  696                     top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
  697                     cpusetobj_strprint(cpusetbuf2, &all_cpus));
  698 
  699         /*
  700          * Collapse nonsense levels that may be created out of convenience by
  701          * the MD layers.  They cause extra work in the search functions.
  702          */
  703         while (top->cg_children == 1) {
  704                 top = &top->cg_child[0];
  705                 top->cg_parent = NULL;
  706         }
  707         smp_topo_fill(top);
  708         return (top);
  709 }
  710 
  711 struct cpu_group *
  712 smp_topo_alloc(u_int count)
  713 {
  714         static u_int index;
  715         u_int curr;
  716 
  717         curr = index;
  718         index += count;
  719         return (&group[curr]);
  720 }
  721 
  722 struct cpu_group *
  723 smp_topo_none(void)
  724 {
  725         struct cpu_group *top;
  726 
  727         top = &group[0];
  728         top->cg_parent = NULL;
  729         top->cg_child = NULL;
  730         top->cg_mask = all_cpus;
  731         top->cg_count = mp_ncpus;
  732         top->cg_children = 0;
  733         top->cg_level = CG_SHARE_NONE;
  734         top->cg_flags = 0;
  735 
  736         return (top);
  737 }
  738 
  739 static int
  740 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
  741     int count, int flags, int start)
  742 {
  743         char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
  744         cpuset_t mask;
  745         int i;
  746 
  747         CPU_ZERO(&mask);
  748         for (i = 0; i < count; i++, start++)
  749                 CPU_SET(start, &mask);
  750         child->cg_parent = parent;
  751         child->cg_child = NULL;
  752         child->cg_children = 0;
  753         child->cg_level = share;
  754         child->cg_count = count;
  755         child->cg_flags = flags;
  756         child->cg_mask = mask;
  757         parent->cg_children++;
  758         for (; parent != NULL; parent = parent->cg_parent) {
  759                 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
  760                         panic("Duplicate children in %p.  mask (%s) child (%s)",
  761                             parent,
  762                             cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
  763                             cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
  764                 CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask);
  765                 parent->cg_count += child->cg_count;
  766         }
  767 
  768         return (start);
  769 }
  770 
  771 struct cpu_group *
  772 smp_topo_1level(int share, int count, int flags)
  773 {
  774         struct cpu_group *child;
  775         struct cpu_group *top;
  776         int packages;
  777         int cpu;
  778         int i;
  779 
  780         cpu = 0;
  781         top = &group[0];
  782         packages = mp_ncpus / count;
  783         top->cg_child = child = &group[1];
  784         top->cg_level = CG_SHARE_NONE;
  785         for (i = 0; i < packages; i++, child++)
  786                 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
  787         return (top);
  788 }
  789 
  790 struct cpu_group *
  791 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
  792     int l1flags)
  793 {
  794         struct cpu_group *top;
  795         struct cpu_group *l1g;
  796         struct cpu_group *l2g;
  797         int cpu;
  798         int i;
  799         int j;
  800 
  801         cpu = 0;
  802         top = &group[0];
  803         l2g = &group[1];
  804         top->cg_child = l2g;
  805         top->cg_level = CG_SHARE_NONE;
  806         top->cg_children = mp_ncpus / (l2count * l1count);
  807         l1g = l2g + top->cg_children;
  808         for (i = 0; i < top->cg_children; i++, l2g++) {
  809                 l2g->cg_parent = top;
  810                 l2g->cg_child = l1g;
  811                 l2g->cg_level = l2share;
  812                 for (j = 0; j < l2count; j++, l1g++)
  813                         cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
  814                             l1flags, cpu);
  815         }
  816         return (top);
  817 }
  818 
  819 struct cpu_group *
  820 smp_topo_find(struct cpu_group *top, int cpu)
  821 {
  822         struct cpu_group *cg;
  823         cpuset_t mask;
  824         int children;
  825         int i;
  826 
  827         CPU_SETOF(cpu, &mask);
  828         cg = top;
  829         for (;;) {
  830                 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
  831                         return (NULL);
  832                 if (cg->cg_children == 0)
  833                         return (cg);
  834                 children = cg->cg_children;
  835                 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
  836                         if (CPU_OVERLAP(&cg->cg_mask, &mask))
  837                                 break;
  838         }
  839         return (NULL);
  840 }
  841 #else /* !SMP */
  842 
  843 void
  844 smp_rendezvous_cpus(cpuset_t map,
  845         void (*setup_func)(void *), 
  846         void (*action_func)(void *),
  847         void (*teardown_func)(void *),
  848         void *arg)
  849 {
  850         /*
  851          * In the !SMP case we just need to ensure the same initial conditions
  852          * as the SMP case.
  853          */
  854         spinlock_enter();
  855         if (setup_func != NULL)
  856                 setup_func(arg);
  857         if (action_func != NULL)
  858                 action_func(arg);
  859         if (teardown_func != NULL)
  860                 teardown_func(arg);
  861         spinlock_exit();
  862 }
  863 
  864 void
  865 smp_rendezvous(void (*setup_func)(void *), 
  866                void (*action_func)(void *),
  867                void (*teardown_func)(void *),
  868                void *arg)
  869 {
  870 
  871         smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func,
  872             arg);
  873 }
  874 
  875 /*
  876  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
  877  * APIs will still work using this dummy support.
  878  */
  879 static void
  880 mp_setvariables_for_up(void *dummy)
  881 {
  882         mp_ncpus = 1;
  883         mp_ncores = 1;
  884         mp_maxid = PCPU_GET(cpuid);
  885         CPU_SETOF(mp_maxid, &all_cpus);
  886         KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
  887 }
  888 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
  889     mp_setvariables_for_up, NULL);
  890 #endif /* SMP */
  891 
  892 void
  893 smp_no_rendezvous_barrier(void *dummy)
  894 {
  895 #ifdef SMP
  896         KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
  897 #endif
  898 }
  899 
  900 void
  901 smp_rendezvous_cpus_retry(cpuset_t map,
  902         void (* setup_func)(void *),
  903         void (* action_func)(void *),
  904         void (* teardown_func)(void *),
  905         void (* wait_func)(void *, int),
  906         struct smp_rendezvous_cpus_retry_arg *arg)
  907 {
  908         int cpu;
  909 
  910         CPU_COPY(&map, &arg->cpus);
  911 
  912         /*
  913          * Only one CPU to execute on.
  914          */
  915         if (!smp_started) {
  916                 spinlock_enter();
  917                 if (setup_func != NULL)
  918                         setup_func(arg);
  919                 if (action_func != NULL)
  920                         action_func(arg);
  921                 if (teardown_func != NULL)
  922                         teardown_func(arg);
  923                 spinlock_exit();
  924                 return;
  925         }
  926 
  927         /*
  928          * Execute an action on all specified CPUs while retrying until they
  929          * all acknowledge completion.
  930          */
  931         for (;;) {
  932                 smp_rendezvous_cpus(
  933                     arg->cpus,
  934                     setup_func,
  935                     action_func,
  936                     teardown_func,
  937                     arg);
  938 
  939                 if (CPU_EMPTY(&arg->cpus))
  940                         break;
  941 
  942                 CPU_FOREACH(cpu) {
  943                         if (!CPU_ISSET(cpu, &arg->cpus))
  944                                 continue;
  945                         wait_func(arg, cpu);
  946                 }
  947         }
  948 }
  949 
  950 void
  951 smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
  952 {
  953 
  954         CPU_CLR_ATOMIC(curcpu, &arg->cpus);
  955 }
  956 
  957 /*
  958  * If (prio & PDROP) == 0:
  959  * Wait for specified idle threads to switch once.  This ensures that even
  960  * preempted threads have cycled through the switch function once,
  961  * exiting their codepaths.  This allows us to change global pointers
  962  * with no other synchronization.
  963  * If (prio & PDROP) != 0:
  964  * Force the specified CPUs to switch context at least once.
  965  */
  966 int
  967 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
  968 {
  969         struct pcpu *pcpu;
  970         u_int *gen;
  971         int error;
  972         int cpu;
  973 
  974         error = 0;
  975         if ((prio & PDROP) == 0) {
  976                 gen = malloc(sizeof(u_int) * MAXCPU, M_TEMP, M_WAITOK);
  977                 for (cpu = 0; cpu <= mp_maxid; cpu++) {
  978                         if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
  979                                 continue;
  980                         pcpu = pcpu_find(cpu);
  981                         gen[cpu] = pcpu->pc_idlethread->td_generation;
  982                 }
  983         }
  984         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  985                 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
  986                         continue;
  987                 pcpu = pcpu_find(cpu);
  988                 thread_lock(curthread);
  989                 sched_bind(curthread, cpu);
  990                 thread_unlock(curthread);
  991                 if ((prio & PDROP) != 0)
  992                         continue;
  993                 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
  994                         error = tsleep(quiesce_cpus, prio & ~PDROP, wmesg, 1);
  995                         if (error != EWOULDBLOCK)
  996                                 goto out;
  997                         error = 0;
  998                 }
  999         }
 1000 out:
 1001         thread_lock(curthread);
 1002         sched_unbind(curthread);
 1003         thread_unlock(curthread);
 1004         if ((prio & PDROP) == 0)
 1005                 free(gen, M_TEMP);
 1006 
 1007         return (error);
 1008 }
 1009 
 1010 int
 1011 quiesce_all_cpus(const char *wmesg, int prio)
 1012 {
 1013 
 1014         return quiesce_cpus(all_cpus, wmesg, prio);
 1015 }
 1016 
 1017 /*
 1018  * Observe all CPUs not executing in critical section.
 1019  * We are not in one so the check for us is safe. If the found
 1020  * thread changes to something else we know the section was
 1021  * exited as well.
 1022  */
 1023 void
 1024 quiesce_all_critical(void)
 1025 {
 1026         struct thread *td, *newtd;
 1027         struct pcpu *pcpu;
 1028         int cpu;
 1029 
 1030         MPASS(curthread->td_critnest == 0);
 1031 
 1032         CPU_FOREACH(cpu) {
 1033                 pcpu = cpuid_to_pcpu[cpu];
 1034                 td = pcpu->pc_curthread;
 1035                 for (;;) {
 1036                         if (td->td_critnest == 0)
 1037                                 break;
 1038                         cpu_spinwait();
 1039                         newtd = (struct thread *)
 1040                             atomic_load_acq_ptr((void *)pcpu->pc_curthread);
 1041                         if (td != newtd)
 1042                                 break;
 1043                 }
 1044         }
 1045 }
 1046 
 1047 static void
 1048 cpus_fence_seq_cst_issue(void *arg __unused)
 1049 {
 1050 
 1051         atomic_thread_fence_seq_cst();
 1052 }
 1053 
 1054 /*
 1055  * Send an IPI forcing a sequentially consistent fence.
 1056  *
 1057  * Allows replacement of an explicitly fence with a compiler barrier.
 1058  * Trades speed up during normal execution for a significant slowdown when
 1059  * the barrier is needed.
 1060  */
 1061 void
 1062 cpus_fence_seq_cst(void)
 1063 {
 1064 
 1065 #ifdef SMP
 1066         smp_rendezvous(
 1067             smp_no_rendezvous_barrier,
 1068             cpus_fence_seq_cst_issue,
 1069             smp_no_rendezvous_barrier,
 1070             NULL
 1071         );
 1072 #else
 1073         cpus_fence_seq_cst_issue(NULL);
 1074 #endif
 1075 }
 1076 
 1077 /* Extra care is taken with this sysctl because the data type is volatile */
 1078 static int
 1079 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
 1080 {
 1081         int error, active;
 1082 
 1083         active = smp_started;
 1084         error = SYSCTL_OUT(req, &active, sizeof(active));
 1085         return (error);
 1086 }
 1087 
 1088 #ifdef SMP
 1089 void
 1090 topo_init_node(struct topo_node *node)
 1091 {
 1092 
 1093         bzero(node, sizeof(*node));
 1094         TAILQ_INIT(&node->children);
 1095 }
 1096 
 1097 void
 1098 topo_init_root(struct topo_node *root)
 1099 {
 1100 
 1101         topo_init_node(root);
 1102         root->type = TOPO_TYPE_SYSTEM;
 1103 }
 1104 
 1105 /*
 1106  * Add a child node with the given ID under the given parent.
 1107  * Do nothing if there is already a child with that ID.
 1108  */
 1109 struct topo_node *
 1110 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
 1111     topo_node_type type, uintptr_t subtype)
 1112 {
 1113         struct topo_node *node;
 1114 
 1115         TAILQ_FOREACH_REVERSE(node, &parent->children,
 1116             topo_children, siblings) {
 1117                 if (node->hwid == hwid
 1118                     && node->type == type && node->subtype == subtype) {
 1119                         return (node);
 1120                 }
 1121         }
 1122 
 1123         node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
 1124         topo_init_node(node);
 1125         node->parent = parent;
 1126         node->hwid = hwid;
 1127         node->type = type;
 1128         node->subtype = subtype;
 1129         TAILQ_INSERT_TAIL(&parent->children, node, siblings);
 1130         parent->nchildren++;
 1131 
 1132         return (node);
 1133 }
 1134 
 1135 /*
 1136  * Find a child node with the given ID under the given parent.
 1137  */
 1138 struct topo_node *
 1139 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
 1140     topo_node_type type, uintptr_t subtype)
 1141 {
 1142 
 1143         struct topo_node *node;
 1144 
 1145         TAILQ_FOREACH(node, &parent->children, siblings) {
 1146                 if (node->hwid == hwid
 1147                     && node->type == type && node->subtype == subtype) {
 1148                         return (node);
 1149                 }
 1150         }
 1151 
 1152         return (NULL);
 1153 }
 1154 
 1155 /*
 1156  * Given a node change the order of its parent's child nodes such
 1157  * that the node becomes the firt child while preserving the cyclic
 1158  * order of the children.  In other words, the given node is promoted
 1159  * by rotation.
 1160  */
 1161 void
 1162 topo_promote_child(struct topo_node *child)
 1163 {
 1164         struct topo_node *next;
 1165         struct topo_node *node;
 1166         struct topo_node *parent;
 1167 
 1168         parent = child->parent;
 1169         next = TAILQ_NEXT(child, siblings);
 1170         TAILQ_REMOVE(&parent->children, child, siblings);
 1171         TAILQ_INSERT_HEAD(&parent->children, child, siblings);
 1172 
 1173         while (next != NULL) {
 1174                 node = next;
 1175                 next = TAILQ_NEXT(node, siblings);
 1176                 TAILQ_REMOVE(&parent->children, node, siblings);
 1177                 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
 1178                 child = node;
 1179         }
 1180 }
 1181 
 1182 /*
 1183  * Iterate to the next node in the depth-first search (traversal) of
 1184  * the topology tree.
 1185  */
 1186 struct topo_node *
 1187 topo_next_node(struct topo_node *top, struct topo_node *node)
 1188 {
 1189         struct topo_node *next;
 1190 
 1191         if ((next = TAILQ_FIRST(&node->children)) != NULL)
 1192                 return (next);
 1193 
 1194         if ((next = TAILQ_NEXT(node, siblings)) != NULL)
 1195                 return (next);
 1196 
 1197         while (node != top && (node = node->parent) != top)
 1198                 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
 1199                         return (next);
 1200 
 1201         return (NULL);
 1202 }
 1203 
 1204 /*
 1205  * Iterate to the next node in the depth-first search of the topology tree,
 1206  * but without descending below the current node.
 1207  */
 1208 struct topo_node *
 1209 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
 1210 {
 1211         struct topo_node *next;
 1212 
 1213         if ((next = TAILQ_NEXT(node, siblings)) != NULL)
 1214                 return (next);
 1215 
 1216         while (node != top && (node = node->parent) != top)
 1217                 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
 1218                         return (next);
 1219 
 1220         return (NULL);
 1221 }
 1222 
 1223 /*
 1224  * Assign the given ID to the given topology node that represents a logical
 1225  * processor.
 1226  */
 1227 void
 1228 topo_set_pu_id(struct topo_node *node, cpuid_t id)
 1229 {
 1230 
 1231         KASSERT(node->type == TOPO_TYPE_PU,
 1232             ("topo_set_pu_id: wrong node type: %u", node->type));
 1233         KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
 1234             ("topo_set_pu_id: cpuset already not empty"));
 1235         node->id = id;
 1236         CPU_SET(id, &node->cpuset);
 1237         node->cpu_count = 1;
 1238         node->subtype = 1;
 1239 
 1240         while ((node = node->parent) != NULL) {
 1241                 KASSERT(!CPU_ISSET(id, &node->cpuset),
 1242                     ("logical ID %u is already set in node %p", id, node));
 1243                 CPU_SET(id, &node->cpuset);
 1244                 node->cpu_count++;
 1245         }
 1246 }
 1247 
 1248 static struct topology_spec {
 1249         topo_node_type  type;
 1250         bool            match_subtype;
 1251         uintptr_t       subtype;
 1252 } topology_level_table[TOPO_LEVEL_COUNT] = {
 1253         [TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, },
 1254         [TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, },
 1255         [TOPO_LEVEL_CACHEGROUP] = {
 1256                 .type = TOPO_TYPE_CACHE,
 1257                 .match_subtype = true,
 1258                 .subtype = CG_SHARE_L3,
 1259         },
 1260         [TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, },
 1261         [TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, },
 1262 };
 1263 
 1264 static bool
 1265 topo_analyze_table(struct topo_node *root, int all, enum topo_level level,
 1266     struct topo_analysis *results)
 1267 {
 1268         struct topology_spec *spec;
 1269         struct topo_node *node;
 1270         int count;
 1271 
 1272         if (level >= TOPO_LEVEL_COUNT)
 1273                 return (true);
 1274 
 1275         spec = &topology_level_table[level];
 1276         count = 0;
 1277         node = topo_next_node(root, root);
 1278 
 1279         while (node != NULL) {
 1280                 if (node->type != spec->type ||
 1281                     (spec->match_subtype && node->subtype != spec->subtype)) {
 1282                         node = topo_next_node(root, node);
 1283                         continue;
 1284                 }
 1285                 if (!all && CPU_EMPTY(&node->cpuset)) {
 1286                         node = topo_next_nonchild_node(root, node);
 1287                         continue;
 1288                 }
 1289 
 1290                 count++;
 1291 
 1292                 if (!topo_analyze_table(node, all, level + 1, results))
 1293                         return (false);
 1294 
 1295                 node = topo_next_nonchild_node(root, node);
 1296         }
 1297 
 1298         /* No explicit subgroups is essentially one subgroup. */
 1299         if (count == 0) {
 1300                 count = 1;
 1301 
 1302                 if (!topo_analyze_table(root, all, level + 1, results))
 1303                         return (false);
 1304         }
 1305 
 1306         if (results->entities[level] == -1)
 1307                 results->entities[level] = count;
 1308         else if (results->entities[level] != count)
 1309                 return (false);
 1310 
 1311         return (true);
 1312 }
 1313 
 1314 /*
 1315  * Check if the topology is uniform, that is, each package has the same number
 1316  * of cores in it and each core has the same number of threads (logical
 1317  * processors) in it.  If so, calculate the number of packages, the number of
 1318  * groups per package, the number of cachegroups per group, and the number of
 1319  * logical processors per cachegroup.  'all' parameter tells whether to include
 1320  * administratively disabled logical processors into the analysis.
 1321  */
 1322 int
 1323 topo_analyze(struct topo_node *topo_root, int all,
 1324     struct topo_analysis *results)
 1325 {
 1326 
 1327         results->entities[TOPO_LEVEL_PKG] = -1;
 1328         results->entities[TOPO_LEVEL_CORE] = -1;
 1329         results->entities[TOPO_LEVEL_THREAD] = -1;
 1330         results->entities[TOPO_LEVEL_GROUP] = -1;
 1331         results->entities[TOPO_LEVEL_CACHEGROUP] = -1;
 1332 
 1333         if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results))
 1334                 return (0);
 1335 
 1336         KASSERT(results->entities[TOPO_LEVEL_PKG] > 0,
 1337                 ("bug in topology or analysis"));
 1338 
 1339         return (1);
 1340 }
 1341 
 1342 #endif /* SMP */

Cache object: 0056906788a7cb160c8c38c1e956439a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.