The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Neelkanth Natu
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/10.1/sys/mips/mips/mp_machdep.c 265606 2014-05-07 20:28:27Z scottl $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/cpuset.h>
   33 #include <sys/ktr.h>
   34 #include <sys/proc.h>
   35 #include <sys/lock.h>
   36 #include <sys/malloc.h>
   37 #include <sys/mutex.h>
   38 #include <sys/kernel.h>
   39 #include <sys/pcpu.h>
   40 #include <sys/smp.h>
   41 #include <sys/sched.h>
   42 #include <sys/bus.h>
   43 
   44 #include <vm/vm.h>
   45 #include <vm/pmap.h>
   46 #include <vm/vm_extern.h>
   47 #include <vm/vm_kern.h>
   48 
   49 #include <machine/clock.h>
   50 #include <machine/smp.h>
   51 #include <machine/hwfunc.h>
   52 #include <machine/intr_machdep.h>
   53 #include <machine/cache.h>
   54 #include <machine/tlb.h>
   55 
   56 struct pcb stoppcbs[MAXCPU];
   57 
   58 static void *dpcpu;
   59 static struct mtx ap_boot_mtx;
   60 
   61 static volatile int aps_ready;
   62 static volatile int mp_naps;
   63 
   64 static void
   65 ipi_send(struct pcpu *pc, int ipi)
   66 {
   67 
   68         CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
   69 
   70         atomic_set_32(&pc->pc_pending_ipis, ipi);
   71         platform_ipi_send(pc->pc_cpuid);
   72 
   73         CTR1(KTR_SMP, "%s: sent", __func__);
   74 }
   75 
   76 void
   77 ipi_all_but_self(int ipi)
   78 {
   79         cpuset_t other_cpus;
   80 
   81         other_cpus = all_cpus;
   82         CPU_CLR(PCPU_GET(cpuid), &other_cpus);
   83         ipi_selected(other_cpus, ipi);
   84 }
   85 
   86 /* Send an IPI to a set of cpus. */
   87 void
   88 ipi_selected(cpuset_t cpus, int ipi)
   89 {
   90         struct pcpu *pc;
   91 
   92         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
   93                 if (CPU_ISSET(pc->pc_cpuid, &cpus)) {
   94                         CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc,
   95                             ipi);
   96                         ipi_send(pc, ipi);
   97                 }
   98         }
   99 }
  100 
  101 /* Send an IPI to a specific CPU. */
  102 void
  103 ipi_cpu(int cpu, u_int ipi)
  104 {
  105 
  106         CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
  107         ipi_send(cpuid_to_pcpu[cpu], ipi);
  108 }
  109 
  110 /*
  111  * Handle an IPI sent to this processor.
  112  */
  113 static int
  114 mips_ipi_handler(void *arg)
  115 {
  116         u_int   cpu, ipi, ipi_bitmap;
  117         int     bit;
  118 
  119         cpu = PCPU_GET(cpuid);
  120 
  121         platform_ipi_clear();   /* quiesce the pending ipi interrupt */
  122 
  123         ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
  124         if (ipi_bitmap == 0)
  125                 return (FILTER_STRAY);
  126 
  127         CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
  128 
  129         while ((bit = ffs(ipi_bitmap))) {
  130                 bit = bit - 1;
  131                 ipi = 1 << bit;
  132                 ipi_bitmap &= ~ipi;
  133                 switch (ipi) {
  134                 case IPI_RENDEZVOUS:
  135                         CTR0(KTR_SMP, "IPI_RENDEZVOUS");
  136                         smp_rendezvous_action();
  137                         break;
  138 
  139                 case IPI_AST:
  140                         CTR0(KTR_SMP, "IPI_AST");
  141                         break;
  142 
  143                 case IPI_STOP:
  144                         /*
  145                          * IPI_STOP_HARD is mapped to IPI_STOP so it is not
  146                          * necessary to add it in the switch.
  147                          */
  148                         CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
  149 
  150                         savectx(&stoppcbs[cpu]);
  151                         tlb_save();
  152 
  153                         /* Indicate we are stopped */
  154                         CPU_SET_ATOMIC(cpu, &stopped_cpus);
  155 
  156                         /* Wait for restart */
  157                         while (!CPU_ISSET(cpu, &started_cpus))
  158                                 cpu_spinwait();
  159 
  160                         CPU_CLR_ATOMIC(cpu, &started_cpus);
  161                         CPU_CLR_ATOMIC(cpu, &stopped_cpus);
  162                         CTR0(KTR_SMP, "IPI_STOP (restart)");
  163                         break;
  164                 case IPI_PREEMPT:
  165                         CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
  166                         sched_preempt(curthread);
  167                         break;
  168                 case IPI_HARDCLOCK:
  169                         CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
  170                         hardclockintr();
  171                         break;
  172                 default:
  173                         panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
  174                 }
  175         }
  176 
  177         return (FILTER_HANDLED);
  178 }
  179 
  180 static int
  181 start_ap(int cpuid)
  182 {
  183         int cpus, ms;
  184 
  185         cpus = mp_naps;
  186         dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
  187 
  188         mips_sync();
  189 
  190         if (platform_start_ap(cpuid) != 0)
  191                 return (-1);                    /* could not start AP */
  192 
  193         for (ms = 0; ms < 5000; ++ms) {
  194                 if (mp_naps > cpus)
  195                         return (0);             /* success */
  196                 else
  197                         DELAY(1000);
  198         }
  199 
  200         return (-2);                            /* timeout initializing AP */
  201 }
  202 
  203 void
  204 cpu_mp_setmaxid(void)
  205 {
  206         cpuset_t cpumask;
  207         int cpu, last;
  208 
  209         platform_cpu_mask(&cpumask);
  210         mp_ncpus = 0;
  211         last = 1;
  212         while ((cpu = CPU_FFS(&cpumask)) != 0) {
  213                 last = cpu;
  214                 cpu--;
  215                 CPU_CLR(cpu, &cpumask);
  216                 mp_ncpus++;
  217         }
  218         if (mp_ncpus <= 0)
  219                 mp_ncpus = 1;
  220 
  221         mp_maxid = min(last, MAXCPU) - 1;
  222 }
  223 
  224 void
  225 cpu_mp_announce(void)
  226 {
  227         /* NOTHING */
  228 }
  229 
  230 struct cpu_group *
  231 cpu_topo(void)
  232 {
  233         return (platform_smp_topo());
  234 }
  235 
  236 int
  237 cpu_mp_probe(void)
  238 {
  239 
  240         return (mp_ncpus > 1);
  241 }
  242 
  243 void
  244 cpu_mp_start(void)
  245 {
  246         int error, cpuid;
  247         cpuset_t cpumask;
  248 
  249         mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
  250 
  251         CPU_ZERO(&all_cpus);
  252         platform_cpu_mask(&cpumask);
  253 
  254         while (!CPU_EMPTY(&cpumask)) {
  255                 cpuid = CPU_FFS(&cpumask) - 1;
  256                 CPU_CLR(cpuid, &cpumask);
  257 
  258                 if (cpuid >= MAXCPU) {
  259                         printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
  260                         continue;
  261                 }
  262 
  263                 if (cpuid != platform_processor_id()) {
  264                         if ((error = start_ap(cpuid)) != 0) {
  265                                 printf("AP #%d failed to start: %d\n", cpuid, error);
  266                                 continue;
  267                         }
  268                         if (bootverbose)
  269                                 printf("AP #%d started!\n", cpuid);
  270                 }
  271                 CPU_SET(cpuid, &all_cpus);
  272         }
  273 }
  274 
  275 void
  276 smp_init_secondary(u_int32_t cpuid)
  277 {
  278 
  279         /* TLB */
  280         mips_wr_wired(0);
  281         tlb_invalidate_all();
  282         mips_wr_wired(VMWIRED_ENTRIES);
  283 
  284         /*
  285          * We assume that the L1 cache on the APs is identical to the one
  286          * on the BSP.
  287          */
  288         mips_dcache_wbinv_all();
  289         mips_icache_sync_all();
  290 
  291         mips_sync();
  292 
  293         mips_wr_entryhi(0);
  294 
  295         pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
  296         dpcpu_init(dpcpu, cpuid);
  297 
  298         /* The AP has initialized successfully - allow the BSP to proceed */
  299         ++mp_naps;
  300 
  301         /* Spin until the BSP is ready to release the APs */
  302         while (!aps_ready)
  303                 ;
  304 
  305         /* Initialize curthread. */
  306         KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
  307         PCPU_SET(curthread, PCPU_GET(idlethread));
  308 
  309         mtx_lock_spin(&ap_boot_mtx);
  310 
  311         smp_cpus++;
  312 
  313         CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
  314 
  315         if (bootverbose)
  316                 printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
  317 
  318         if (smp_cpus == mp_ncpus) {
  319                 atomic_store_rel_int(&smp_started, 1);
  320         }
  321 
  322         mtx_unlock_spin(&ap_boot_mtx);
  323 
  324         while (smp_started == 0)
  325                 ; /* nothing */
  326 
  327         /* Start per-CPU event timers. */
  328         cpu_initclocks_ap();
  329 
  330         /* enter the scheduler */
  331         sched_throw(NULL);
  332 
  333         panic("scheduler returned us to %s", __func__);
  334         /* NOTREACHED */
  335 }
  336 
  337 static void
  338 release_aps(void *dummy __unused)
  339 {
  340         int ipi_irq;
  341 
  342         if (mp_ncpus == 1)
  343                 return;
  344 
  345         /*
  346          * IPI handler
  347          */
  348         ipi_irq = platform_ipi_intrnum();
  349         cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
  350                                INTR_TYPE_MISC | INTR_EXCL, NULL);
  351 
  352         atomic_store_rel_int(&aps_ready, 1);
  353 
  354         while (smp_started == 0)
  355                 ; /* nothing */
  356 }
  357 
  358 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);

Cache object: 787a6b0eb238349dbbf564869408d90c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.