The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_smp.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2001
    3  *      John Baldwin <jhb@FreeBSD.org>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY JOHN BALDWIN AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL JOHN BALDWIN OR THE VOICES IN HIS HEAD
   21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   27  * THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * This module holds the global variables and machine independent functions
   32  * used for the kernel SMP support.
   33  */
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/5.2/sys/kern/subr_smp.c 123125 2003-12-03 14:55:31Z jhb $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/kernel.h>
   41 #include <sys/ktr.h>
   42 #include <sys/proc.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 #include <sys/pcpu.h>
   46 #include <sys/smp.h>
   47 #include <sys/sysctl.h>
   48 
   49 #include <machine/smp.h>
   50 
   51 #ifdef SMP
   52 volatile u_int stopped_cpus;
   53 volatile u_int started_cpus;
   54 
   55 void (*cpustop_restartfunc)(void);
   56 #endif
   57 
   58 int mp_ncpus;
   59 
   60 volatile int smp_started;
   61 u_int all_cpus;
   62 u_int mp_maxid;
   63 
   64 SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD, NULL, "Kernel SMP");
   65 
   66 int smp_active = 0;     /* are the APs allowed to run? */
   67 SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
   68     "Number of Auxillary Processors (APs) that were successfully started");
   69 
   70 int smp_disabled = 0;   /* has smp been disabled? */
   71 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN, &smp_disabled, 0,
   72     "SMP has been disabled from the loader");
   73 TUNABLE_INT("kern.smp.disabled", &smp_disabled);
   74 
   75 int smp_cpus = 1;       /* how many cpu's running */
   76 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD, &smp_cpus, 0,
   77     "Number of CPUs online");
   78 
   79 #ifdef SMP
   80 /* Enable forwarding of a signal to a process running on a different CPU */
   81 static int forward_signal_enabled = 1;
   82 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
   83            &forward_signal_enabled, 0,
   84            "Forwarding of a signal to a process on a different CPU");
   85 
   86 /* Enable forwarding of roundrobin to all other cpus */
   87 static int forward_roundrobin_enabled = 1;
   88 SYSCTL_INT(_kern_smp, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
   89            &forward_roundrobin_enabled, 0,
   90            "Forwarding of roundrobin to all other CPUs");
   91 
   92 /* Variables needed for SMP rendezvous. */
   93 static void (*smp_rv_setup_func)(void *arg);
   94 static void (*smp_rv_action_func)(void *arg);
   95 static void (*smp_rv_teardown_func)(void *arg);
   96 static void *smp_rv_func_arg;
   97 static volatile int smp_rv_waiters[2];
   98 static struct mtx smp_rv_mtx;
   99 
  100 /*
  101  * Let the MD SMP code initialize mp_maxid very early if it can.
  102  */
  103 static void
  104 mp_setmaxid(void *dummy)
  105 {
  106         cpu_mp_setmaxid();
  107 }
  108 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL)
  109 
  110 /*
  111  * Call the MD SMP initialization code.
  112  */
  113 static void
  114 mp_start(void *dummy)
  115 {
  116 
  117         /* Probe for MP hardware. */
  118         if (smp_disabled != 0 || cpu_mp_probe() == 0) {
  119                 mp_ncpus = 1;
  120                 all_cpus = PCPU_GET(cpumask);
  121                 return;
  122         }
  123 
  124         mtx_init(&smp_rv_mtx, "smp rendezvous", NULL, MTX_SPIN);
  125         cpu_mp_start();
  126         printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
  127             mp_ncpus);
  128         cpu_mp_announce();
  129 }
  130 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_SECOND, mp_start, NULL)
  131 
  132 void
  133 forward_signal(struct thread *td)
  134 {
  135         int id;
  136 
  137         /*
  138          * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
  139          * this thread, so all we need to do is poke it if it is currently
  140          * executing so that it executes ast().
  141          */
  142         mtx_assert(&sched_lock, MA_OWNED);
  143         KASSERT(TD_IS_RUNNING(td),
  144             ("forward_signal: thread is not TDS_RUNNING"));
  145 
  146         CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
  147 
  148         if (!smp_started || cold || panicstr)
  149                 return;
  150         if (!forward_signal_enabled)
  151                 return;
  152 
  153         /* No need to IPI ourself. */
  154         if (td == curthread)
  155                 return;
  156 
  157         id = td->td_oncpu;
  158         if (id == NOCPU)
  159                 return;
  160         ipi_selected(1 << id, IPI_AST);
  161 }
  162 
  163 void
  164 forward_roundrobin(void)
  165 {
  166         struct pcpu *pc;
  167         struct thread *td;
  168         u_int id, map;
  169 
  170         mtx_assert(&sched_lock, MA_OWNED);
  171 
  172         CTR0(KTR_SMP, "forward_roundrobin()");
  173 
  174         if (!smp_started || cold || panicstr)
  175                 return;
  176         if (!forward_roundrobin_enabled)
  177                 return;
  178         map = 0;
  179         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
  180                 td = pc->pc_curthread;
  181                 id = pc->pc_cpumask;
  182                 if (id != PCPU_GET(cpumask) && (id & stopped_cpus) == 0 &&
  183                     td != pc->pc_idlethread) {
  184                         td->td_flags |= TDF_NEEDRESCHED;
  185                         map |= id;
  186                 }
  187         }
  188         ipi_selected(map, IPI_AST);
  189 }
  190 
  191 /*
  192  * When called the executing CPU will send an IPI to all other CPUs
  193  *  requesting that they halt execution.
  194  *
  195  * Usually (but not necessarily) called with 'other_cpus' as its arg.
  196  *
  197  *  - Signals all CPUs in map to stop.
  198  *  - Waits for each to stop.
  199  *
  200  * Returns:
  201  *  -1: error
  202  *   0: NA
  203  *   1: ok
  204  *
  205  * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
  206  *            from executing at same time.
  207  */
  208 int
  209 stop_cpus(u_int map)
  210 {
  211         int i;
  212 
  213         if (!smp_started)
  214                 return 0;
  215 
  216         CTR1(KTR_SMP, "stop_cpus(%x)", map);
  217 
  218         /* send the stop IPI to all CPUs in map */
  219         ipi_selected(map, IPI_STOP);
  220         
  221         i = 0;
  222         while ((atomic_load_acq_int(&stopped_cpus) & map) != map) {
  223                 /* spin */
  224                 i++;
  225 #ifdef DIAGNOSTIC
  226                 if (i == 100000) {
  227                         printf("timeout stopping cpus\n");
  228                         break;
  229                 }
  230 #endif
  231         }
  232 
  233         return 1;
  234 }
  235 
  236 
  237 /*
  238  * Called by a CPU to restart stopped CPUs. 
  239  *
  240  * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
  241  *
  242  *  - Signals all CPUs in map to restart.
  243  *  - Waits for each to restart.
  244  *
  245  * Returns:
  246  *  -1: error
  247  *   0: NA
  248  *   1: ok
  249  */
  250 int
  251 restart_cpus(u_int map)
  252 {
  253 
  254         if (!smp_started)
  255                 return 0;
  256 
  257         CTR1(KTR_SMP, "restart_cpus(%x)", map);
  258 
  259         /* signal other cpus to restart */
  260         atomic_store_rel_int(&started_cpus, map);
  261 
  262         /* wait for each to clear its bit */
  263         while ((atomic_load_acq_int(&stopped_cpus) & map) != 0)
  264                 ;       /* nothing */
  265 
  266         return 1;
  267 }
  268 
  269 /*
  270  * All-CPU rendezvous.  CPUs are signalled, all execute the setup function 
  271  * (if specified), rendezvous, execute the action function (if specified),
  272  * rendezvous again, execute the teardown function (if specified), and then
  273  * resume.
  274  *
  275  * Note that the supplied external functions _must_ be reentrant and aware
  276  * that they are running in parallel and in an unknown lock context.
  277  */
  278 void
  279 smp_rendezvous_action(void)
  280 {
  281 
  282         /* setup function */
  283         if (smp_rv_setup_func != NULL)
  284                 smp_rv_setup_func(smp_rv_func_arg);
  285         /* spin on entry rendezvous */
  286         atomic_add_int(&smp_rv_waiters[0], 1);
  287         while (atomic_load_acq_int(&smp_rv_waiters[0]) < mp_ncpus)
  288                 ;       /* nothing */
  289         /* action function */
  290         if (smp_rv_action_func != NULL)
  291                 smp_rv_action_func(smp_rv_func_arg);
  292         /* spin on exit rendezvous */
  293         atomic_add_int(&smp_rv_waiters[1], 1);
  294         while (atomic_load_acq_int(&smp_rv_waiters[1]) < mp_ncpus)
  295                 ;       /* nothing */
  296         /* teardown function */
  297         if (smp_rv_teardown_func != NULL)
  298                 smp_rv_teardown_func(smp_rv_func_arg);
  299 }
  300 
  301 void
  302 smp_rendezvous(void (* setup_func)(void *), 
  303                void (* action_func)(void *),
  304                void (* teardown_func)(void *),
  305                void *arg)
  306 {
  307 
  308         if (!smp_started) {
  309                 if (setup_func != NULL)
  310                         setup_func(arg);
  311                 if (action_func != NULL)
  312                         action_func(arg);
  313                 if (teardown_func != NULL)
  314                         teardown_func(arg);
  315                 return;
  316         }
  317                 
  318         /* obtain rendezvous lock */
  319         mtx_lock_spin(&smp_rv_mtx);
  320 
  321         /* set static function pointers */
  322         smp_rv_setup_func = setup_func;
  323         smp_rv_action_func = action_func;
  324         smp_rv_teardown_func = teardown_func;
  325         smp_rv_func_arg = arg;
  326         smp_rv_waiters[0] = 0;
  327         smp_rv_waiters[1] = 0;
  328 
  329         /* signal other processors, which will enter the IPI with interrupts off */
  330         ipi_all_but_self(IPI_RENDEZVOUS);
  331 
  332         /* call executor function */
  333         smp_rendezvous_action();
  334 
  335         /* release lock */
  336         mtx_unlock_spin(&smp_rv_mtx);
  337 }
  338 #else /* !SMP */
  339 
  340 /*
  341  * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
  342  * APIs will still work using this dummy support.
  343  */
  344 static void
  345 mp_setvariables_for_up(void *dummy)
  346 {
  347         mp_ncpus = 1;
  348         mp_maxid = PCPU_GET(cpuid);
  349         all_cpus = PCPU_GET(cpumask);
  350         KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
  351 }
  352 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
  353     mp_setvariables_for_up, NULL)
  354 
  355 void
  356 smp_rendezvous(void (* setup_func)(void *), 
  357                void (* action_func)(void *),
  358                void (* teardown_func)(void *),
  359                void *arg)
  360 {
  361 
  362         if (setup_func != NULL)
  363                 setup_func(arg);
  364         if (action_func != NULL)
  365                 action_func(arg);
  366         if (teardown_func != NULL)
  367                 teardown_func(arg);
  368 }
  369 #endif /* SMP */

Cache object: 52b8f39a44bc742d74e41adff921bad9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.