The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_smp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * This module holds the global variables and machine independent functions
   32  * used for the kernel SMP support.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/9.0/sys/kern/subr_smp.c 227889 2011-11-23 15:41:55Z attilio $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/kernel.h>
   41 #include <sys/ktr.h>
   42 #include <sys/proc.h>
   43 #include <sys/bus.h>
   44 #include <sys/lock.h>
   45 #include <sys/mutex.h>
   46 #include <sys/pcpu.h>
   47 #include <sys/smp.h>
   48 #include <sys/sysctl.h>
   49 
   50 #include <machine/cpu.h>
   51 #include <machine/smp.h>
   52 
   53 #include "opt_sched.h"
   54 
   55 #ifdef SMP
   56 volatile cpuset_t stopped_cpus;
   57 volatile cpuset_t started_cpus;
   58 cpuset_t hlt_cpus_mask;
   59 cpuset_t logical_cpus_mask;
   60 
   61 void (*cpustop_restartfunc)(void);
   62 #endif
   63 /* This is used in modules that need to work in both SMP and UP. */
   64 cpuset_t all_cpus;
   65 
   66 int mp_ncpus;
   67 /* export this for libkvm consumers. */
   68 int mp_maxcpus = MAXCPU;
   69 
   70 volatile int smp_started;
   71 u_int mp_maxid;
   72 
   73 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL, "Kernel SMP");
   74 
   75 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
   76     "Max CPU ID.");
   77 
   78 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
   79     0, "Max number of CPUs that the system was compiled for.");
   80 
   81 int smp_active = 0;     /* are the APs allowed to run? */
   82 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
   83     "Number of Auxillary Processors (APs) that were successfully started");
   84 
   85 int smp_disabled = 0;   /* has smp been disabled? */
   86 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
   87     &smp_disabled, 0, "SMP has been disabled from the loader");
   88 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
   89 
   90 int smp_cpus = 1;       /* how many cpu's running */
   91 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
   92     "Number of CPUs online");
   93 
   94 int smp_topology = 0;   /* Which topology we're using. */
   95 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
   96     "Topology override setting; 0 is default provided by hardware.");
   97 TUNABLE_INT("kern.smp.topology", &smp_topology);
   98 
   99 #ifdef SMP
  100 /* Enable forwarding of a signal to a process running on a different CPU */
  101 static int forward_signal_enabled = 1;
  102 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
  103            &forward_signal_enabled, 0,
  104            "Forwarding of a signal to a process on a different CPU");
  105 
  106 /* Variables needed for SMP rendezvous. */
  107 static volatile int smp_rv_ncpus;
  108 static void (*volatile smp_rv_setup_func)(void *arg);
  109 static void (*volatile smp_rv_action_func)(void *arg);
  110 static void (*volatile smp_rv_teardown_func)(void *arg);
  111 static void *volatile smp_rv_func_arg;
  112 static volatile int smp_rv_waiters[4];
  113 
  114 /* 
  115  * Shared mutex to restrict busywaits between smp_rendezvous() and
  116  * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
  117  * functions trigger at once and cause multiple CPUs to busywait with
  118  * interrupts disabled. 
  119  */
  120 struct mtx smp_ipi_mtx;
  121 
  122 /*
  123  * Let the MD SMP code initialize mp_maxid very early if it can.
  124  */
  125 static void
  126 mp_setmaxid(void *dummy)
  127 {
  128         cpu_mp_setmaxid();
  129 }
  130 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
  131 
  132 /*
  133  * Call the MD SMP initialization code.
  134  */
  135 static void
  136 mp_start(void *dummy)
  137 {
  138 
  139         mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
  140 
  141         /* Probe for MP hardware. */
  142         if (smp_disabled != 0 || cpu_mp_probe() == 0) {
  143                 mp_ncpus = 1;
  144                 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
  145                 return;
  146         }
  147 
  148         cpu_mp_start();
  149         printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
  150             mp_ncpus);
  151         cpu_mp_announce();
  152 }
  153 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
  154 
  155 void
  156 forward_signal(struct thread *td)
  157 {
  158         int id;
  159 
  160         /*
  161          * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
  162          * this thread, so all we need to do is poke it if it is currently
  163          * executing so that it executes ast().
  164          */
  165         THREAD_LOCK_ASSERT(td, MA_OWNED);
  166         KASSERT(TD_IS_RUNNING(td),
  167             ("forward_signal: thread is not TDS_RUNNING"));
  168 
  169         CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
  170 
  171         if (!smp_started || cold || panicstr)
  172                 return;
  173         if (!forward_signal_enabled)
  174                 return;
  175 
  176         /* No need to IPI ourself. */
  177         if (td == curthread)
  178                 return;
  179 
  180         id = td->td_oncpu;
  181         if (id == NOCPU)
  182                 return;
  183         ipi_cpu(id, IPI_AST);
  184 }
  185 
  186 /*
  187  * When called the executing CPU will send an IPI to all other CPUs
  188  *  requesting that they halt execution.
  189  *
  190  * Usually (but not necessarily) called with 'other_cpus' as its arg.
  191  *
  192  *  - Signals all CPUs in map to stop.
  193  *  - Waits for each to stop.
  194  *
  195  * Returns:
  196  *  -1: error
  197  *   0: NA
  198  *   1: ok
  199  *
  200  */
  201 static int
  202 generic_stop_cpus(cpuset_t map, u_int type)
  203 {
  204 #ifdef KTR
  205         char cpusetbuf[CPUSETBUFSIZ];
  206 #endif
  207         static volatile u_int stopping_cpu = NOCPU;
  208         int i;
  209 
  210         KASSERT(
  211 #if defined(__amd64__)
  212             type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
  213 #else
  214             type == IPI_STOP || type == IPI_STOP_HARD,
  215 #endif
  216             ("%s: invalid stop type", __func__));
  217 
  218         if (!smp_started)
  219                 return (0);
  220 
  221         CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
  222             cpusetobj_strprint(cpusetbuf, &map), type);
  223 
  224         if (stopping_cpu != PCPU_GET(cpuid))
  225                 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
  226                     PCPU_GET(cpuid)) == 0)
  227                         while (stopping_cpu != NOCPU)
  228                                 cpu_spinwait(); /* spin */
  229 
  230         /* send the stop IPI to all CPUs in map */
  231         ipi_selected(map, type);
  232 
  233         i = 0;
  234         while (!CPU_SUBSET(&stopped_cpus, &map)) {
  235                 /* spin */
  236                 cpu_spinwait();
  237                 i++;
  238                 if (i == 100000000) {
  239                         printf("timeout stopping cpus\n");
  240                         break;
  241                 }
  242         }
  243 
  244         stopping_cpu = NOCPU;
  245         return (1);
  246 }
  247 
  248 int
  249 stop_cpus(cpuset_t map)
  250 {
  251 
  252         return (generic_stop_cpus(map, IPI_STOP));
  253 }
  254 
  255 int
  256 stop_cpus_hard(cpuset_t map)
  257 {
  258 
  259         return (generic_stop_cpus(map, IPI_STOP_HARD));
  260 }
  261 
  262 #if defined(__amd64__)
  263 int
  264 suspend_cpus(cpuset_t map)
  265 {
  266 
  267         return (generic_stop_cpus(map, IPI_SUSPEND));
  268 }
  269 #endif
  270 
  271 /*
  272  * Called by a CPU to restart stopped CPUs. 
  273  *
  274  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
  275  *
  276  *  - Signals all CPUs in map to restart.
  277  *  - Waits for each to restart.
  278  *
  279  * Returns:
  280  *  -1: error
  281  *   0: NA
  282  *   1: ok
  283  */
  284 int
  285 restart_cpus(cpuset_t map)
  286 {
  287 #ifdef KTR
  288         char cpusetbuf[CPUSETBUFSIZ];
  289 #endif
  290 
  291         if (!smp_started)
  292                 return 0;
  293 
  294         CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
  295 
  296         /* signal other cpus to restart */
  297         CPU_COPY_STORE_REL(&map, &started_cpus);
  298 
  299         /* wait for each to clear its bit */
  300         while (CPU_OVERLAP(&stopped_cpus, &map))
  301                 cpu_spinwait();
  302 
  303         return 1;
  304 }
  305 
  306 /*
  307  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function 
  308  * (if specified), rendezvous, execute the action function (if specified),
  309  * rendezvous again, execute the teardown function (if specified), and then
  310  * resume.
  311  *
  312  * Note that the supplied external functions _must_ be reentrant and aware
  313  * that they are running in parallel and in an unknown lock context.
  314  */
  315 void
  316 smp_rendezvous_action(void)
  317 {
  318         struct thread *td;
  319         void *local_func_arg;
  320         void (*local_setup_func)(void*);
  321         void (*local_action_func)(void*);
  322         void (*local_teardown_func)(void*);
  323 #ifdef INVARIANTS
  324         int owepreempt;
  325 #endif
  326 
  327         /* Ensure we have up-to-date values. */
  328         atomic_add_acq_int(&smp_rv_waiters[0], 1);
  329         while (smp_rv_waiters[0] < smp_rv_ncpus)
  330                 cpu_spinwait();
  331 
  332         /* Fetch rendezvous parameters after acquire barrier. */
  333         local_func_arg = smp_rv_func_arg;
  334         local_setup_func = smp_rv_setup_func;
  335         local_action_func = smp_rv_action_func;
  336         local_teardown_func = smp_rv_teardown_func;
  337 
  338         /*
  339          * Use a nested critical section to prevent any preemptions
  340          * from occurring during a rendezvous action routine.
  341          * Specifically, if a rendezvous handler is invoked via an IPI
  342          * and the interrupted thread was in the critical_exit()
  343          * function after setting td_critnest to 0 but before
  344          * performing a deferred preemption, this routine can be
  345          * invoked with td_critnest set to 0 and td_owepreempt true.
  346          * In that case, a critical_exit() during the rendezvous
  347          * action would trigger a preemption which is not permitted in
  348          * a rendezvous action.  To fix this, wrap all of the
  349          * rendezvous action handlers in a critical section.  We
  350          * cannot use a regular critical section however as having
  351          * critical_exit() preempt from this routine would also be
  352          * problematic (the preemption must not occur before the IPI
  353          * has been acknowledged via an EOI).  Instead, we
  354          * intentionally ignore td_owepreempt when leaving the
  355          * critical section.  This should be harmless because we do
  356          * not permit rendezvous action routines to schedule threads,
  357          * and thus td_owepreempt should never transition from 0 to 1
  358          * during this routine.
  359          */
  360         td = curthread;
  361         td->td_critnest++;
  362 #ifdef INVARIANTS
  363         owepreempt = td->td_owepreempt;
  364 #endif
  365         
  366         /*
  367          * If requested, run a setup function before the main action
  368          * function.  Ensure all CPUs have completed the setup
  369          * function before moving on to the action function.
  370          */
  371         if (local_setup_func != smp_no_rendevous_barrier) {
  372                 if (smp_rv_setup_func != NULL)
  373                         smp_rv_setup_func(smp_rv_func_arg);
  374                 atomic_add_int(&smp_rv_waiters[1], 1);
  375                 while (smp_rv_waiters[1] < smp_rv_ncpus)
  376                         cpu_spinwait();
  377         }
  378 
  379         if (local_action_func != NULL)
  380                 local_action_func(local_func_arg);
  381 
  382         if (local_teardown_func != smp_no_rendevous_barrier) {
  383                 /*
  384                  * Signal that the main action has been completed.  If a
  385                  * full exit rendezvous is requested, then all CPUs will
  386                  * wait here until all CPUs have finished the main action.
  387                  */
  388                 atomic_add_int(&smp_rv_waiters[2], 1);
  389                 while (smp_rv_waiters[2] < smp_rv_ncpus)
  390                         cpu_spinwait();
  391 
  392                 if (local_teardown_func != NULL)
  393                         local_teardown_func(local_func_arg);
  394         }
  395 
  396         /*
  397          * Signal that the rendezvous is fully completed by this CPU.
  398          * This means that no member of smp_rv_* pseudo-structure will be
  399          * accessed by this target CPU after this point; in particular,
  400          * memory pointed by smp_rv_func_arg.
  401          */
  402         atomic_add_int(&smp_rv_waiters[3], 1);
  403 
  404         td->td_critnest--;
  405         KASSERT(owepreempt == td->td_owepreempt,
  406             ("rendezvous action changed td_owepreempt"));
  407 }
  408 
  409 void
  410 smp_rendezvous_cpus(cpuset_t map,
  411         void (* setup_func)(void *), 
  412         void (* action_func)(void *),
  413         void (* teardown_func)(void *),
  414         void *arg)
  415 {
  416         int curcpumap, i, ncpus = 0;
  417 
  418         /* Look comments in the !SMP case. */
  419         if (!smp_started) {
  420                 spinlock_enter();
  421                 if (setup_func != NULL)
  422                         setup_func(arg);
  423                 if (action_func != NULL)
  424                         action_func(arg);
  425                 if (teardown_func != NULL)
  426                         teardown_func(arg);
  427                 spinlock_exit();
  428                 return;
  429         }
  430 
  431         CPU_FOREACH(i) {
  432                 if (CPU_ISSET(i, &map))
  433                         ncpus++;
  434         }
  435         if (ncpus == 0)
  436                 panic("ncpus is 0 with non-zero map");
  437 
  438         mtx_lock_spin(&smp_ipi_mtx);
  439 
  440         /* Pass rendezvous parameters via global variables. */
  441         smp_rv_ncpus = ncpus;
  442         smp_rv_setup_func = setup_func;
  443         smp_rv_action_func = action_func;
  444         smp_rv_teardown_func = teardown_func;
  445         smp_rv_func_arg = arg;
  446         smp_rv_waiters[1] = 0;
  447         smp_rv_waiters[2] = 0;
  448         smp_rv_waiters[3] = 0;
  449         atomic_store_rel_int(&smp_rv_waiters[0], 0);
  450 
  451         /*
  452          * Signal other processors, which will enter the IPI with
  453          * interrupts off.
  454          */
  455         curcpumap = CPU_ISSET(curcpu, &map);
  456         CPU_CLR(curcpu, &map);
  457         ipi_selected(map, IPI_RENDEZVOUS);
  458 
  459         /* Check if the current CPU is in the map */
  460         if (curcpumap != 0)
  461                 smp_rendezvous_action();
  462 
  463         /*
  464          * Ensure that the master CPU waits for all the other
  465          * CPUs to finish the rendezvous, so that smp_rv_*
  466          * pseudo-structure and the arg are guaranteed to not
  467          * be in use.
  468          */
  469         while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
  470                 cpu_spinwait();
  471 
  472         mtx_unlock_spin(&smp_ipi_mtx);
  473 }
  474 
  475 void
  476 smp_rendezvous(void (* setup_func)(void *), 
  477                void (* action_func)(void *),
  478                void (* teardown_func)(void *),
  479                void *arg)
  480 {
  481         smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
  482 }
  483 
  484 static struct cpu_group group[MAXCPU];
  485 
  486 struct cpu_group *
  487 smp_topo(void)
  488 {
  489         char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
  490         struct cpu_group *top;
  491 
  492         /*
  493          * Check for a fake topology request for debugging purposes.
  494          */
  495         switch (smp_topology) {
  496         case 1:
  497                 /* Dual core with no sharing.  */
  498                 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
  499                 break;
  500         case 2:
  501                 /* No topology, all cpus are equal. */
  502                 top = smp_topo_none();
  503                 break;
  504         case 3:
  505                 /* Dual core with shared L2.  */
  506                 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
  507                 break;
  508         case 4:
  509                 /* quad core, shared l3 among each package, private l2.  */
  510                 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
  511                 break;
  512         case 5:
  513                 /* quad core,  2 dualcore parts on each package share l2.  */
  514                 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
  515                 break;
  516         case 6:
  517                 /* Single-core 2xHTT */
  518                 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
  519                 break;
  520         case 7:
  521                 /* quad core with a shared l3, 8 threads sharing L2.  */
  522                 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
  523                     CG_FLAG_SMT);
  524                 break;
  525         default:
  526                 /* Default, ask the system what it wants. */
  527                 top = cpu_topo();
  528                 break;
  529         }
  530         /*
  531          * Verify the returned topology.
  532          */
  533         if (top->cg_count != mp_ncpus)
  534                 panic("Built bad topology at %p.  CPU count %d != %d",
  535                     top, top->cg_count, mp_ncpus);
  536         if (CPU_CMP(&top->cg_mask, &all_cpus))
  537                 panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
  538                     top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
  539                     cpusetobj_strprint(cpusetbuf2, &all_cpus));
  540         return (top);
  541 }
  542 
  543 struct cpu_group *
  544 smp_topo_none(void)
  545 {
  546         struct cpu_group *top;
  547 
  548         top = &group[0];
  549         top->cg_parent = NULL;
  550         top->cg_child = NULL;
  551         top->cg_mask = all_cpus;
  552         top->cg_count = mp_ncpus;
  553         top->cg_children = 0;
  554         top->cg_level = CG_SHARE_NONE;
  555         top->cg_flags = 0;
  556         
  557         return (top);
  558 }
  559 
  560 static int
  561 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
  562     int count, int flags, int start)
  563 {
  564         char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
  565         cpuset_t mask;
  566         int i;
  567 
  568         CPU_ZERO(&mask);
  569         for (i = 0; i < count; i++, start++)
  570                 CPU_SET(start, &mask);
  571         child->cg_parent = parent;
  572         child->cg_child = NULL;
  573         child->cg_children = 0;
  574         child->cg_level = share;
  575         child->cg_count = count;
  576         child->cg_flags = flags;
  577         child->cg_mask = mask;
  578         parent->cg_children++;
  579         for (; parent != NULL; parent = parent->cg_parent) {
  580                 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
  581                         panic("Duplicate children in %p.  mask (%s) child (%s)",
  582                             parent,
  583                             cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
  584                             cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
  585                 CPU_OR(&parent->cg_mask, &child->cg_mask);
  586                 parent->cg_count += child->cg_count;
  587         }
  588 
  589         return (start);
  590 }
  591 
  592 struct cpu_group *
  593 smp_topo_1level(int share, int count, int flags)
  594 {
  595         struct cpu_group *child;
  596         struct cpu_group *top;
  597         int packages;
  598         int cpu;
  599         int i;
  600 
  601         cpu = 0;
  602         top = &group[0];
  603         packages = mp_ncpus / count;
  604         top->cg_child = child = &group[1];
  605         top->cg_level = CG_SHARE_NONE;
  606         for (i = 0; i < packages; i++, child++)
  607                 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
  608         return (top);
  609 }
  610 
  611 struct cpu_group *
  612 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
  613     int l1flags)
  614 {
  615         struct cpu_group *top;
  616         struct cpu_group *l1g;
  617         struct cpu_group *l2g;
  618         int cpu;
  619         int i;
  620         int j;
  621 
  622         cpu = 0;
  623         top = &group[0];
  624         l2g = &group[1];
  625         top->cg_child = l2g;
  626         top->cg_level = CG_SHARE_NONE;
  627         top->cg_children = mp_ncpus / (l2count * l1count);
  628         l1g = l2g + top->cg_children;
  629         for (i = 0; i < top->cg_children; i++, l2g++) {
  630                 l2g->cg_parent = top;
  631                 l2g->cg_child = l1g;
  632                 l2g->cg_level = l2share;
  633                 for (j = 0; j < l2count; j++, l1g++)
  634                         cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
  635                             l1flags, cpu);
  636         }
  637         return (top);
  638 }
  639 
  640 
  641 struct cpu_group *
  642 smp_topo_find(struct cpu_group *top, int cpu)
  643 {
  644         struct cpu_group *cg;
  645         cpuset_t mask;
  646         int children;
  647         int i;
  648 
  649         CPU_SETOF(cpu, &mask);
  650         cg = top;
  651         for (;;) {
  652                 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
  653                         return (NULL);
  654                 if (cg->cg_children == 0)
  655                         return (cg);
  656                 children = cg->cg_children;
  657                 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
  658                         if (CPU_OVERLAP(&cg->cg_mask, &mask))
  659                                 break;
  660         }
  661         return (NULL);
  662 }
  663 #else /* !SMP */
  664 
  665 void
  666 smp_rendezvous_cpus(cpuset_t map,
  667         void (*setup_func)(void *), 
  668         void (*action_func)(void *),
  669         void (*teardown_func)(void *),
  670         void *arg)
  671 {
  672         /*
  673          * In the !SMP case we just need to ensure the same initial conditions
  674          * as the SMP case.
  675          */
  676         spinlock_enter();
  677         if (setup_func != NULL)
  678                 setup_func(arg);
  679         if (action_func != NULL)
  680                 action_func(arg);
  681         if (teardown_func != NULL)
  682                 teardown_func(arg);
  683         spinlock_exit();
  684 }
  685 
  686 void
  687 smp_rendezvous(void (*setup_func)(void *), 
  688                void (*action_func)(void *),
  689                void (*teardown_func)(void *),
  690                void *arg)
  691 {
  692 
  693         /* Look comments in the smp_rendezvous_cpus() case. */
  694         spinlock_enter();
  695         if (setup_func != NULL)
  696                 setup_func(arg);
  697         if (action_func != NULL)
  698                 action_func(arg);
  699         if (teardown_func != NULL)
  700                 teardown_func(arg);
  701         spinlock_exit();
  702 }
  703 
  704 /*
  705  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
  706  * APIs will still work using this dummy support.
  707  */
  708 static void
  709 mp_setvariables_for_up(void *dummy)
  710 {
  711         mp_ncpus = 1;
  712         mp_maxid = PCPU_GET(cpuid);
  713         CPU_SETOF(mp_maxid, &all_cpus);
  714         KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
  715 }
  716 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
  717     mp_setvariables_for_up, NULL);
  718 #endif /* SMP */
  719 
  720 void
  721 smp_no_rendevous_barrier(void *dummy)
  722 {
  723 #ifdef SMP
  724         KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
  725 #endif
  726 }

Cache object: 27f2bdb8352e0f53bc0720d25759d4a0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.