The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_smp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 /*
   28  * This module holds the global variables and machine independent functions
   29  * used for the kernel SMP support.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/systm.h>
   37 #include <sys/kernel.h>
   38 #include <sys/ktr.h>
   39 #include <sys/proc.h>
   40 #include <sys/bus.h>
   41 #include <sys/lock.h>
   42 #include <sys/mutex.h>
   43 #include <sys/pcpu.h>
   44 #include <sys/smp.h>
   45 #include <sys/sysctl.h>
   46 
   47 #include <machine/cpu.h>
   48 #include <machine/smp.h>
   49 
   50 #include "opt_sched.h"
   51 
   52 #ifdef SMP
   53 volatile cpuset_t stopped_cpus;
   54 volatile cpuset_t started_cpus;
   55 cpuset_t hlt_cpus_mask;
   56 cpuset_t logical_cpus_mask;
   57 
   58 void (*cpustop_restartfunc)(void);
   59 #endif
   60 /* This is used in modules that need to work in both SMP and UP. */
   61 cpuset_t all_cpus;
   62 
   63 int mp_ncpus;
   64 /* export this for libkvm consumers. */
   65 int mp_maxcpus = MAXCPU;
   66 
   67 volatile int smp_started;
   68 u_int mp_maxid;
   69 
   70 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
   71     "Kernel SMP");
   72 
   73 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
   74     "Max CPU ID.");
   75 
   76 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
   77     0, "Max number of CPUs that the system was compiled for.");
   78 
   79 int smp_active = 0;     /* are the APs allowed to run? */
   80 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
   81     "Number of Auxillary Processors (APs) that were successfully started");
   82 
   83 int smp_disabled = 0;   /* has smp been disabled? */
   84 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
   85     &smp_disabled, 0, "SMP has been disabled from the loader");
   86 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
   87 
   88 int smp_cpus = 1;       /* how many cpu's running */
   89 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
   90     "Number of CPUs online");
   91 
   92 int smp_topology = 0;   /* Which topology we're using. */
   93 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
   94     "Topology override setting; 0 is default provided by hardware.");
   95 TUNABLE_INT("kern.smp.topology", &smp_topology);
   96 
   97 #ifdef SMP
   98 /* Enable forwarding of a signal to a process running on a different CPU */
   99 static int forward_signal_enabled = 1;
  100 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
  101            &forward_signal_enabled, 0,
  102            "Forwarding of a signal to a process on a different CPU");
  103 
  104 /* Variables needed for SMP rendezvous. */
  105 static volatile int smp_rv_ncpus;
  106 static void (*volatile smp_rv_setup_func)(void *arg);
  107 static void (*volatile smp_rv_action_func)(void *arg);
  108 static void (*volatile smp_rv_teardown_func)(void *arg);
  109 static void *volatile smp_rv_func_arg;
  110 static volatile int smp_rv_waiters[4];
  111 
  112 /* 
  113  * Shared mutex to restrict busywaits between smp_rendezvous() and
  114  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
  115  * functions trigger at once and cause multiple CPUs to busywait with
  116  * interrupts disabled. 
  117  */
  118 struct mtx smp_ipi_mtx;
  119 
  120 /*
  121  * Let the MD SMP code initialize mp_maxid very early if it can.
  122  */
  123 static void
  124 mp_setmaxid(void *dummy)
  125 {
  126         cpu_mp_setmaxid();
  127 }
  128 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
  129 
  130 /*
  131  * Call the MD SMP initialization code.
  132  */
  133 static void
  134 mp_start(void *dummy)
  135 {
  136 
  137         mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
  138 
  139         /* Probe for MP hardware. */
  140         if (smp_disabled != 0 || cpu_mp_probe() == 0) {
  141                 mp_ncpus = 1;
  142                 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
  143                 return;
  144         }
  145 
  146         cpu_mp_start();
  147         printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
  148             mp_ncpus);
  149         cpu_mp_announce();
  150 }
  151 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
  152 
  153 void
  154 forward_signal(struct thread *td)
  155 {
  156         int id;
  157 
  158         /*
  159          * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
  160          * this thread, so all we need to do is poke it if it is currently
  161          * executing so that it executes ast().
  162          */
  163         THREAD_LOCK_ASSERT(td, MA_OWNED);
  164         KASSERT(TD_IS_RUNNING(td),
  165             ("forward_signal: thread is not TDS_RUNNING"));
  166 
  167         CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
  168 
  169         if (!smp_started || cold || panicstr)
  170                 return;
  171         if (!forward_signal_enabled)
  172                 return;
  173 
  174         /* No need to IPI ourself. */
  175         if (td == curthread)
  176                 return;
  177 
  178         id = td->td_oncpu;
  179         if (id == NOCPU)
  180                 return;
  181         ipi_cpu(id, IPI_AST);
  182 }
  183 
  184 /*
  185  * When called the executing CPU will send an IPI to all other CPUs
  186  *  requesting that they halt execution.
  187  *
  188  * Usually (but not necessarily) called with 'other_cpus' as its arg.
  189  *
  190  *  - Signals all CPUs in map to stop.
  191  *  - Waits for each to stop.
  192  *
  193  * Returns:
  194  *  -1: error
  195  *   0: NA
  196  *   1: ok
  197  *
  198  */
  199 static int
  200 generic_stop_cpus(cpuset_t map, u_int type)
  201 {
  202 #ifdef KTR
  203         char cpusetbuf[CPUSETBUFSIZ];
  204 #endif
  205         static volatile u_int stopping_cpu = NOCPU;
  206         int i;
  207 
  208         KASSERT(
  209 #if defined(__amd64__) || defined(__i386__)
  210             type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
  211 #else
  212             type == IPI_STOP || type == IPI_STOP_HARD,
  213 #endif
  214             ("%s: invalid stop type", __func__));
  215 
  216         if (!smp_started)
  217                 return (0);
  218 
  219         CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
  220             cpusetobj_strprint(cpusetbuf, &map), type);
  221 
  222         if (stopping_cpu != PCPU_GET(cpuid))
  223                 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
  224                     PCPU_GET(cpuid)) == 0)
  225                         while (stopping_cpu != NOCPU)
  226                                 cpu_spinwait(); /* spin */
  227 
  228         /* send the stop IPI to all CPUs in map */
  229         ipi_selected(map, type);
  230 
  231         i = 0;
  232         while (!CPU_SUBSET(&stopped_cpus, &map)) {
  233                 /* spin */
  234                 cpu_spinwait();
  235                 i++;
  236                 if (i == 100000000) {
  237                         printf("timeout stopping cpus\n");
  238                         break;
  239                 }
  240         }
  241 
  242         stopping_cpu = NOCPU;
  243         return (1);
  244 }
  245 
  246 int
  247 stop_cpus(cpuset_t map)
  248 {
  249 
  250         return (generic_stop_cpus(map, IPI_STOP));
  251 }
  252 
  253 int
  254 stop_cpus_hard(cpuset_t map)
  255 {
  256 
  257         return (generic_stop_cpus(map, IPI_STOP_HARD));
  258 }
  259 
  260 #if defined(__amd64__) || defined(__i386__)
  261 int
  262 suspend_cpus(cpuset_t map)
  263 {
  264 
  265         return (generic_stop_cpus(map, IPI_SUSPEND));
  266 }
  267 #endif
  268 
  269 /*
  270  * Called by a CPU to restart stopped CPUs. 
  271  *
  272  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
  273  *
  274  *  - Signals all CPUs in map to restart.
  275  *  - Waits for each to restart.
  276  *
  277  * Returns:
  278  *  -1: error
  279  *   0: NA
  280  *   1: ok
  281  */
  282 int
  283 restart_cpus(cpuset_t map)
  284 {
  285 #ifdef KTR
  286         char cpusetbuf[CPUSETBUFSIZ];
  287 #endif
  288 
  289         if (!smp_started)
  290                 return 0;
  291 
  292         CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
  293 
  294         /* signal other cpus to restart */
  295         CPU_COPY_STORE_REL(&map, &started_cpus);
  296 
  297         /* wait for each to clear its bit */
  298         while (CPU_OVERLAP(&stopped_cpus, &map))
  299                 cpu_spinwait();
  300 
  301         return 1;
  302 }
  303 
  304 /*
  305  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function 
  306  * (if specified), rendezvous, execute the action function (if specified),
  307  * rendezvous again, execute the teardown function (if specified), and then
  308  * resume.
  309  *
  310  * Note that the supplied external functions _must_ be reentrant and aware
  311  * that they are running in parallel and in an unknown lock context.
  312  */
  313 void
  314 smp_rendezvous_action(void)
  315 {
  316         struct thread *td;
  317         void *local_func_arg;
  318         void (*local_setup_func)(void*);
  319         void (*local_action_func)(void*);
  320         void (*local_teardown_func)(void*);
  321 #ifdef INVARIANTS
  322         int owepreempt;
  323 #endif
  324 
  325         /* Ensure we have up-to-date values. */
  326         atomic_add_acq_int(&smp_rv_waiters[0], 1);
  327         while (smp_rv_waiters[0] < smp_rv_ncpus)
  328                 cpu_spinwait();
  329 
  330         /* Fetch rendezvous parameters after acquire barrier. */
  331         local_func_arg = smp_rv_func_arg;
  332         local_setup_func = smp_rv_setup_func;
  333         local_action_func = smp_rv_action_func;
  334         local_teardown_func = smp_rv_teardown_func;
  335 
  336         /*
  337          * Use a nested critical section to prevent any preemptions
  338          * from occurring during a rendezvous action routine.
  339          * Specifically, if a rendezvous handler is invoked via an IPI
  340          * and the interrupted thread was in the critical_exit()
  341          * function after setting td_critnest to 0 but before
  342          * performing a deferred preemption, this routine can be
  343          * invoked with td_critnest set to 0 and td_owepreempt true.
  344          * In that case, a critical_exit() during the rendezvous
  345          * action would trigger a preemption which is not permitted in
  346          * a rendezvous action.  To fix this, wrap all of the
  347          * rendezvous action handlers in a critical section.  We
  348          * cannot use a regular critical section however as having
  349          * critical_exit() preempt from this routine would also be
  350          * problematic (the preemption must not occur before the IPI
  351          * has been acknowledged via an EOI).  Instead, we
  352          * intentionally ignore td_owepreempt when leaving the
  353          * critical section.  This should be harmless because we do
  354          * not permit rendezvous action routines to schedule threads,
  355          * and thus td_owepreempt should never transition from 0 to 1
  356          * during this routine.
  357          */
  358         td = curthread;
  359         td->td_critnest++;
  360 #ifdef INVARIANTS
  361         owepreempt = td->td_owepreempt;
  362 #endif
  363         
  364         /*
  365          * If requested, run a setup function before the main action
  366          * function.  Ensure all CPUs have completed the setup
  367          * function before moving on to the action function.
  368          */
  369         if (local_setup_func != smp_no_rendevous_barrier) {
  370                 if (smp_rv_setup_func != NULL)
  371                         smp_rv_setup_func(smp_rv_func_arg);
  372                 atomic_add_int(&smp_rv_waiters[1], 1);
  373                 while (smp_rv_waiters[1] < smp_rv_ncpus)
  374                         cpu_spinwait();
  375         }
  376 
  377         if (local_action_func != NULL)
  378                 local_action_func(local_func_arg);
  379 
  380         if (local_teardown_func != smp_no_rendevous_barrier) {
  381                 /*
  382                  * Signal that the main action has been completed.  If a
  383                  * full exit rendezvous is requested, then all CPUs will
  384                  * wait here until all CPUs have finished the main action.
  385                  */
  386                 atomic_add_int(&smp_rv_waiters[2], 1);
  387                 while (smp_rv_waiters[2] < smp_rv_ncpus)
  388                         cpu_spinwait();
  389 
  390                 if (local_teardown_func != NULL)
  391                         local_teardown_func(local_func_arg);
  392         }
  393 
  394         /*
  395          * Signal that the rendezvous is fully completed by this CPU.
  396          * This means that no member of smp_rv_* pseudo-structure will be
  397          * accessed by this target CPU after this point; in particular,
  398          * memory pointed by smp_rv_func_arg.
  399          */
  400         atomic_add_int(&smp_rv_waiters[3], 1);
  401 
  402         td->td_critnest--;
  403         KASSERT(owepreempt == td->td_owepreempt,
  404             ("rendezvous action changed td_owepreempt"));
  405 }
  406 
  407 void
  408 smp_rendezvous_cpus(cpuset_t map,
  409         void (* setup_func)(void *), 
  410         void (* action_func)(void *),
  411         void (* teardown_func)(void *),
  412         void *arg)
  413 {
  414         int curcpumap, i, ncpus = 0;
  415 
  416         /* Look comments in the !SMP case. */
  417         if (!smp_started) {
  418                 spinlock_enter();
  419                 if (setup_func != NULL)
  420                         setup_func(arg);
  421                 if (action_func != NULL)
  422                         action_func(arg);
  423                 if (teardown_func != NULL)
  424                         teardown_func(arg);
  425                 spinlock_exit();
  426                 return;
  427         }
  428 
  429         CPU_FOREACH(i) {
  430                 if (CPU_ISSET(i, &map))
  431                         ncpus++;
  432         }
  433         if (ncpus == 0)
  434                 panic("ncpus is 0 with non-zero map");
  435 
  436         mtx_lock_spin(&smp_ipi_mtx);
  437 
  438         /* Pass rendezvous parameters via global variables. */
  439         smp_rv_ncpus = ncpus;
  440         smp_rv_setup_func = setup_func;
  441         smp_rv_action_func = action_func;
  442         smp_rv_teardown_func = teardown_func;
  443         smp_rv_func_arg = arg;
  444         smp_rv_waiters[1] = 0;
  445         smp_rv_waiters[2] = 0;
  446         smp_rv_waiters[3] = 0;
  447         atomic_store_rel_int(&smp_rv_waiters[0], 0);
  448 
  449         /*
  450          * Signal other processors, which will enter the IPI with
  451          * interrupts off.
  452          */
  453         curcpumap = CPU_ISSET(curcpu, &map);
  454         CPU_CLR(curcpu, &map);
  455         ipi_selected(map, IPI_RENDEZVOUS);
  456 
  457         /* Check if the current CPU is in the map */
  458         if (curcpumap != 0)
  459                 smp_rendezvous_action();
  460 
  461         /*
  462          * Ensure that the master CPU waits for all the other
  463          * CPUs to finish the rendezvous, so that smp_rv_*
  464          * pseudo-structure and the arg are guaranteed to not
  465          * be in use.
  466          */
  467         while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
  468                 cpu_spinwait();
  469 
  470         mtx_unlock_spin(&smp_ipi_mtx);
  471 }
  472 
  473 void
  474 smp_rendezvous(void (* setup_func)(void *), 
  475                void (* action_func)(void *),
  476                void (* teardown_func)(void *),
  477                void *arg)
  478 {
  479         smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
  480 }
  481 
  482 static struct cpu_group group[MAXCPU];
  483 
  484 struct cpu_group *
  485 smp_topo(void)
  486 {
  487         char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
  488         struct cpu_group *top;
  489 
  490         /*
  491          * Check for a fake topology request for debugging purposes.
  492          */
  493         switch (smp_topology) {
  494         case 1:
  495                 /* Dual core with no sharing.  */
  496                 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
  497                 break;
  498         case 2:
  499                 /* No topology, all cpus are equal. */
  500                 top = smp_topo_none();
  501                 break;
  502         case 3:
  503                 /* Dual core with shared L2.  */
  504                 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
  505                 break;
  506         case 4:
  507                 /* quad core, shared l3 among each package, private l2.  */
  508                 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
  509                 break;
  510         case 5:
  511                 /* quad core,  2 dualcore parts on each package share l2.  */
  512                 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
  513                 break;
  514         case 6:
  515                 /* Single-core 2xHTT */
  516                 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
  517                 break;
  518         case 7:
  519                 /* quad core with a shared l3, 8 threads sharing L2.  */
  520                 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
  521                     CG_FLAG_SMT);
  522                 break;
  523         default:
  524                 /* Default, ask the system what it wants. */
  525                 top = cpu_topo();
  526                 break;
  527         }
  528         /*
  529          * Verify the returned topology.
  530          */
  531         if (top->cg_count != mp_ncpus)
  532                 panic("Built bad topology at %p.  CPU count %d != %d",
  533                     top, top->cg_count, mp_ncpus);
  534         if (CPU_CMP(&top->cg_mask, &all_cpus))
  535                 panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
  536                     top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
  537                     cpusetobj_strprint(cpusetbuf2, &all_cpus));
  538         return (top);
  539 }
  540 
  541 struct cpu_group *
  542 smp_topo_none(void)
  543 {
  544         struct cpu_group *top;
  545 
  546         top = &group[0];
  547         top->cg_parent = NULL;
  548         top->cg_child = NULL;
  549         top->cg_mask = all_cpus;
  550         top->cg_count = mp_ncpus;
  551         top->cg_children = 0;
  552         top->cg_level = CG_SHARE_NONE;
  553         top->cg_flags = 0;
  554         
  555         return (top);
  556 }
  557 
  558 static int
  559 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
  560     int count, int flags, int start)
  561 {
  562         char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
  563         cpuset_t mask;
  564         int i;
  565 
  566         CPU_ZERO(&mask);
  567         for (i = 0; i < count; i++, start++)
  568                 CPU_SET(start, &mask);
  569         child->cg_parent = parent;
  570         child->cg_child = NULL;
  571         child->cg_children = 0;
  572         child->cg_level = share;
  573         child->cg_count = count;
  574         child->cg_flags = flags;
  575         child->cg_mask = mask;
  576         parent->cg_children++;
  577         for (; parent != NULL; parent = parent->cg_parent) {
  578                 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
  579                         panic("Duplicate children in %p.  mask (%s) child (%s)",
  580                             parent,
  581                             cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
  582                             cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
  583                 CPU_OR(&parent->cg_mask, &child->cg_mask);
  584                 parent->cg_count += child->cg_count;
  585         }
  586 
  587         return (start);
  588 }
  589 
  590 struct cpu_group *
  591 smp_topo_1level(int share, int count, int flags)
  592 {
  593         struct cpu_group *child;
  594         struct cpu_group *top;
  595         int packages;
  596         int cpu;
  597         int i;
  598 
  599         cpu = 0;
  600         top = &group[0];
  601         packages = mp_ncpus / count;
  602         top->cg_child = child = &group[1];
  603         top->cg_level = CG_SHARE_NONE;
  604         for (i = 0; i < packages; i++, child++)
  605                 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
  606         return (top);
  607 }
  608 
  609 struct cpu_group *
  610 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
  611     int l1flags)
  612 {
  613         struct cpu_group *top;
  614         struct cpu_group *l1g;
  615         struct cpu_group *l2g;
  616         int cpu;
  617         int i;
  618         int j;
  619 
  620         cpu = 0;
  621         top = &group[0];
  622         l2g = &group[1];
  623         top->cg_child = l2g;
  624         top->cg_level = CG_SHARE_NONE;
  625         top->cg_children = mp_ncpus / (l2count * l1count);
  626         l1g = l2g + top->cg_children;
  627         for (i = 0; i < top->cg_children; i++, l2g++) {
  628                 l2g->cg_parent = top;
  629                 l2g->cg_child = l1g;
  630                 l2g->cg_level = l2share;
  631                 for (j = 0; j < l2count; j++, l1g++)
  632                         cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
  633                             l1flags, cpu);
  634         }
  635         return (top);
  636 }
  637 
  638 
  639 struct cpu_group *
  640 smp_topo_find(struct cpu_group *top, int cpu)
  641 {
  642         struct cpu_group *cg;
  643         cpuset_t mask;
  644         int children;
  645         int i;
  646 
  647         CPU_SETOF(cpu, &mask);
  648         cg = top;
  649         for (;;) {
  650                 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
  651                         return (NULL);
  652                 if (cg->cg_children == 0)
  653                         return (cg);
  654                 children = cg->cg_children;
  655                 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
  656                         if (CPU_OVERLAP(&cg->cg_mask, &mask))
  657                                 break;
  658         }
  659         return (NULL);
  660 }
  661 #else /* !SMP */
  662 
  663 void
  664 smp_rendezvous_cpus(cpuset_t map,
  665         void (*setup_func)(void *), 
  666         void (*action_func)(void *),
  667         void (*teardown_func)(void *),
  668         void *arg)
  669 {
  670         /*
  671          * In the !SMP case we just need to ensure the same initial conditions
  672          * as the SMP case.
  673          */
  674         spinlock_enter();
  675         if (setup_func != NULL)
  676                 setup_func(arg);
  677         if (action_func != NULL)
  678                 action_func(arg);
  679         if (teardown_func != NULL)
  680                 teardown_func(arg);
  681         spinlock_exit();
  682 }
  683 
  684 void
  685 smp_rendezvous(void (*setup_func)(void *), 
  686                void (*action_func)(void *),
  687                void (*teardown_func)(void *),
  688                void *arg)
  689 {
  690 
  691         /* Look comments in the smp_rendezvous_cpus() case. */
  692         spinlock_enter();
  693         if (setup_func != NULL)
  694                 setup_func(arg);
  695         if (action_func != NULL)
  696                 action_func(arg);
  697         if (teardown_func != NULL)
  698                 teardown_func(arg);
  699         spinlock_exit();
  700 }
  701 
  702 /*
  703  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
  704  * APIs will still work using this dummy support.
  705  */
  706 static void
  707 mp_setvariables_for_up(void *dummy)
  708 {
  709         mp_ncpus = 1;
  710         mp_maxid = PCPU_GET(cpuid);
  711         CPU_SETOF(mp_maxid, &all_cpus);
  712         KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
  713 }
  714 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
  715     mp_setvariables_for_up, NULL);
  716 #endif /* SMP */
  717 
  718 void
  719 smp_no_rendevous_barrier(void *dummy)
  720 {
  721 #ifdef SMP
  722         KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
  723 #endif
  724 }

Cache object: 71426b11e8d2f784e35d2af8854379a8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.