The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2008 Marcel Moolenaar
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/12.0/sys/powerpc/powerpc/mp_machdep.c 338143 2018-08-21 16:43:46Z alc $");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/kernel.h>
   35 #include <sys/ktr.h>
   36 #include <sys/bus.h>
   37 #include <sys/cpuset.h>
   38 #include <sys/lock.h>
   39 #include <sys/malloc.h>
   40 #include <sys/mutex.h>
   41 #include <sys/pcpu.h>
   42 #include <sys/proc.h>
   43 #include <sys/sched.h>
   44 #include <sys/smp.h>
   45 
   46 #include <vm/vm.h>
   47 #include <vm/vm_param.h>
   48 #include <vm/pmap.h>
   49 #include <vm/vm_map.h>
   50 #include <vm/vm_extern.h>
   51 #include <vm/vm_kern.h>
   52 
   53 #include <machine/bus.h>
   54 #include <machine/cpu.h>
   55 #include <machine/intr_machdep.h>
   56 #include <machine/pcb.h>
   57 #include <machine/platform.h>
   58 #include <machine/md_var.h>
   59 #include <machine/setjmp.h>
   60 #include <machine/smp.h>
   61 
   62 #include "pic_if.h"
   63 
   64 extern struct pcpu __pcpu[MAXCPU];
   65 
   66 volatile static int ap_awake;
   67 volatile static u_int ap_letgo;
   68 volatile static u_quad_t ap_timebase;
   69 static u_int ipi_msg_cnt[32];
   70 static struct mtx ap_boot_mtx;
   71 struct pcb stoppcbs[MAXCPU];
   72 
   73 void
   74 machdep_ap_bootstrap(void)
   75 {
   76 
   77         PCPU_SET(awake, 1);
   78         __asm __volatile("msync; isync");
   79 
   80         while (ap_letgo == 0)
   81                 __asm __volatile("or 27,27,27");
   82         __asm __volatile("or 6,6,6");
   83 
   84         /*
   85          * Set timebase as soon as possible to meet an implicit rendezvous
   86          * from cpu_mp_unleash(), which sets ap_letgo and then immediately
   87          * sets timebase.
   88          *
   89          * Note that this is instrinsically racy and is only relevant on
   90          * platforms that do not support better mechanisms.
   91          */
   92         platform_smp_timebase_sync(ap_timebase, 1);
   93 
   94         /* Give platform code a chance to do anything else necessary */
   95         platform_smp_ap_init();
   96 
   97         /* Initialize decrementer */
   98         decr_ap_init();
   99 
  100         /* Serialize console output and AP count increment */
  101         mtx_lock_spin(&ap_boot_mtx);
  102         ap_awake++;
  103         printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
  104         mtx_unlock_spin(&ap_boot_mtx);
  105 
  106         while(smp_started == 0)
  107                 ;
  108 
  109         /* Start per-CPU event timers. */
  110         cpu_initclocks_ap();
  111 
  112         /* Announce ourselves awake, and enter the scheduler */
  113         sched_throw(NULL);
  114 }
  115 
  116 void
  117 cpu_mp_setmaxid(void)
  118 {
  119         struct cpuref cpuref;
  120         int error;
  121 
  122         mp_ncpus = 0;
  123         mp_maxid = 0;
  124         error = platform_smp_first_cpu(&cpuref);
  125         while (!error) {
  126                 mp_ncpus++;
  127                 mp_maxid = max(cpuref.cr_cpuid, mp_maxid);
  128                 error = platform_smp_next_cpu(&cpuref);
  129         }
  130         /* Sanity. */
  131         if (mp_ncpus == 0)
  132                 mp_ncpus = 1;
  133 }
  134 
  135 int
  136 cpu_mp_probe(void)
  137 {
  138 
  139         /*
  140          * We're not going to enable SMP if there's only 1 processor.
  141          */
  142         return (mp_ncpus > 1);
  143 }
  144 
  145 void
  146 cpu_mp_start(void)
  147 {
  148         struct cpuref bsp, cpu;
  149         struct pcpu *pc;
  150         int error;
  151 
  152         error = platform_smp_get_bsp(&bsp);
  153         KASSERT(error == 0, ("Don't know BSP"));
  154 
  155         error = platform_smp_first_cpu(&cpu);
  156         while (!error) {
  157                 if (cpu.cr_cpuid >= MAXCPU) {
  158                         printf("SMP: cpu%d: skipped -- ID out of range\n",
  159                             cpu.cr_cpuid);
  160                         goto next;
  161                 }
  162                 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) {
  163                         printf("SMP: cpu%d: skipped - duplicate ID\n",
  164                             cpu.cr_cpuid);
  165                         goto next;
  166                 }
  167                 if (cpu.cr_cpuid != bsp.cr_cpuid) {
  168                         void *dpcpu;
  169 
  170                         pc = &__pcpu[cpu.cr_cpuid];
  171                         dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK |
  172                             M_ZERO);
  173                         pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
  174                         dpcpu_init(dpcpu, cpu.cr_cpuid);
  175                 } else {
  176                         pc = pcpup;
  177                         pc->pc_cpuid = bsp.cr_cpuid;
  178                         pc->pc_bsp = 1;
  179                 }
  180                 pc->pc_hwref = cpu.cr_hwref;
  181                 CPU_SET(pc->pc_cpuid, &all_cpus);
  182 next:
  183                 error = platform_smp_next_cpu(&cpu);
  184         }
  185 }
  186 
  187 void
  188 cpu_mp_announce(void)
  189 {
  190         struct pcpu *pc;
  191         int i;
  192 
  193         if (!bootverbose)
  194                 return;
  195 
  196         CPU_FOREACH(i) {
  197                 pc = pcpu_find(i);
  198                 if (pc == NULL)
  199                         continue;
  200                 printf("cpu%d: dev=%x", i, (int)pc->pc_hwref);
  201                 if (pc->pc_bsp)
  202                         printf(" (BSP)");
  203                 printf("\n");
  204         }
  205 }
  206 
  207 static void
  208 cpu_mp_unleash(void *dummy)
  209 {
  210         struct pcpu *pc;
  211         int cpus, timeout;
  212         int ret;
  213 
  214         if (mp_ncpus <= 1)
  215                 return;
  216 
  217         mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
  218 
  219         cpus = 0;
  220         smp_cpus = 0;
  221 #ifdef BOOKE
  222         tlb1_ap_prep();
  223 #endif
  224         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
  225                 cpus++;
  226                 if (!pc->pc_bsp) {
  227                         if (bootverbose)
  228                                 printf("Waking up CPU %d (dev=%x)\n",
  229                                     pc->pc_cpuid, (int)pc->pc_hwref);
  230 
  231                         ret = platform_smp_start_cpu(pc);
  232                         if (ret == 0) {
  233                                 timeout = 2000; /* wait 2sec for the AP */
  234                                 while (!pc->pc_awake && --timeout > 0)
  235                                         DELAY(1000);
  236                         }
  237                 } else {
  238                         pc->pc_awake = 1;
  239                 }
  240                 if (pc->pc_awake) {
  241                         if (bootverbose)
  242                                 printf("Adding CPU %d, hwref=%jx, awake=%x\n",
  243                                     pc->pc_cpuid, (uintmax_t)pc->pc_hwref,
  244                                     pc->pc_awake);
  245                         smp_cpus++;
  246                 } else
  247                         CPU_SET(pc->pc_cpuid, &stopped_cpus);
  248         }
  249 
  250         ap_awake = 1;
  251 
  252         /* Provide our current DEC and TB values for APs */
  253         ap_timebase = mftb() + 10;
  254         __asm __volatile("msync; isync");
  255         
  256         /* Let APs continue */
  257         atomic_store_rel_int(&ap_letgo, 1);
  258 
  259         platform_smp_timebase_sync(ap_timebase, 0);
  260 
  261         while (ap_awake < smp_cpus)
  262                 ;
  263 
  264         if (smp_cpus != cpus || cpus != mp_ncpus) {
  265                 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
  266                     mp_ncpus, cpus, smp_cpus);
  267         }
  268 
  269         if (smp_cpus > 1)
  270                 atomic_store_rel_int(&smp_started, 1);
  271 
  272         /* Let the APs get into the scheduler */
  273         DELAY(10000);
  274 
  275 }
  276 
  277 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
  278 
  279 int
  280 powerpc_ipi_handler(void *arg)
  281 {
  282         u_int cpuid;
  283         uint32_t ipimask;
  284         int msg;
  285 
  286         CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
  287 
  288         ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
  289         if (ipimask == 0)
  290                 return (FILTER_STRAY);
  291         while ((msg = ffs(ipimask) - 1) != -1) {
  292                 ipimask &= ~(1u << msg);
  293                 ipi_msg_cnt[msg]++;
  294                 switch (msg) {
  295                 case IPI_AST:
  296                         CTR1(KTR_SMP, "%s: IPI_AST", __func__);
  297                         break;
  298                 case IPI_PREEMPT:
  299                         CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
  300                         sched_preempt(curthread);
  301                         break;
  302                 case IPI_RENDEZVOUS:
  303                         CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
  304                         smp_rendezvous_action();
  305                         break;
  306                 case IPI_STOP:
  307 
  308                         /*
  309                          * IPI_STOP_HARD is mapped to IPI_STOP so it is not
  310                          * necessary to add such case in the switch.
  311                          */
  312                         CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
  313                             __func__);
  314                         cpuid = PCPU_GET(cpuid);
  315                         savectx(&stoppcbs[cpuid]);
  316                         savectx(PCPU_GET(curpcb));
  317                         CPU_SET_ATOMIC(cpuid, &stopped_cpus);
  318                         while (!CPU_ISSET(cpuid, &started_cpus))
  319                                 cpu_spinwait();
  320                         CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
  321                         CPU_CLR_ATOMIC(cpuid, &started_cpus);
  322                         CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
  323                         break;
  324                 case IPI_HARDCLOCK:
  325                         CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
  326                         hardclockintr();
  327                         break;
  328                 }
  329         }
  330 
  331         return (FILTER_HANDLED);
  332 }
  333 
  334 static void
  335 ipi_send(struct pcpu *pc, int ipi)
  336 {
  337 
  338         CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
  339             pc, pc->pc_cpuid, ipi);
  340 
  341         atomic_set_32(&pc->pc_ipimask, (1 << ipi));
  342         powerpc_sync();
  343         PIC_IPI(root_pic, pc->pc_cpuid);
  344 
  345         CTR1(KTR_SMP, "%s: sent", __func__);
  346 }
  347 
  348 /* Send an IPI to a set of cpus. */
  349 void
  350 ipi_selected(cpuset_t cpus, int ipi)
  351 {
  352         struct pcpu *pc;
  353 
  354         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
  355                 if (CPU_ISSET(pc->pc_cpuid, &cpus))
  356                         ipi_send(pc, ipi);
  357         }
  358 }
  359 
  360 /* Send an IPI to a specific CPU. */
  361 void
  362 ipi_cpu(int cpu, u_int ipi)
  363 {
  364 
  365         ipi_send(cpuid_to_pcpu[cpu], ipi);
  366 }
  367 
  368 /* Send an IPI to all CPUs EXCEPT myself. */
  369 void
  370 ipi_all_but_self(int ipi)
  371 {
  372         struct pcpu *pc;
  373 
  374         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
  375                 if (pc != pcpup)
  376                         ipi_send(pc, ipi);
  377         }
  378 }

Cache object: bdf4c773edb224a5d7b7760a593f000b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.