The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Marcel Moolenaar
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  *
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/8.0/sys/powerpc/powerpc/mp_machdep.c 196198 2009-08-13 17:54:11Z attilio $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/kernel.h>
   33 #include <sys/ktr.h>
   34 #include <sys/bus.h>
   35 #include <sys/pcpu.h>
   36 #include <sys/proc.h>
   37 #include <sys/sched.h>
   38 #include <sys/smp.h>
   39 
   40 #include <vm/vm.h>
   41 #include <vm/vm_param.h>
   42 #include <vm/pmap.h>
   43 #include <vm/vm_map.h>
   44 #include <vm/vm_extern.h>
   45 #include <vm/vm_kern.h>
   46 
   47 #include <machine/bus.h>
   48 #include <machine/cpu.h>
   49 #include <machine/intr_machdep.h>
   50 #include <machine/platform.h>
   51 #include <machine/md_var.h>
   52 #include <machine/smp.h>
   53 
   54 #include "pic_if.h"
   55 
   56 extern struct pcpu __pcpu[MAXCPU];
   57 
   58 volatile static int ap_awake;
   59 volatile static u_int ap_letgo;
   60 volatile static uint32_t ap_decr;
   61 volatile static u_quad_t ap_timebase;
   62 static u_int ipi_msg_cnt[32];
   63 
   64 void
   65 machdep_ap_bootstrap(void)
   66 {
   67 
   68         PCPU_SET(pir, mfspr(SPR_PIR));
   69         PCPU_SET(awake, 1);
   70         __asm __volatile("msync; isync");
   71 
   72         while (ap_letgo == 0)
   73                 ;
   74 
   75         /* Initialize DEC and TB, sync with the BSP values */
   76         decr_ap_init();
   77         mttb(ap_timebase);
   78         __asm __volatile("mtdec %0" :: "r"(ap_decr));
   79 
   80         atomic_add_int(&ap_awake, 1);
   81         CTR1(KTR_SMP, "SMP: AP CPU%d launched", PCPU_GET(cpuid));
   82 
   83         /* Initialize curthread */
   84         PCPU_SET(curthread, PCPU_GET(idlethread));
   85         PCPU_SET(curpcb, curthread->td_pcb);
   86 
   87         /* Let the DEC and external interrupts go */
   88         mtmsr(mfmsr() | PSL_EE);
   89         sched_throw(NULL);
   90 }
   91 
   92 struct cpu_group *
   93 cpu_topo(void)
   94 {
   95 
   96         return (smp_topo_none());
   97 }
   98 
   99 void
  100 cpu_mp_setmaxid(void)
  101 {
  102         struct cpuref cpuref;
  103         int error;
  104 
  105         mp_ncpus = 0;
  106         error = platform_smp_first_cpu(&cpuref);
  107         while (!error) {
  108                 mp_ncpus++;
  109                 error = platform_smp_next_cpu(&cpuref);
  110         }
  111         /* Sanity. */
  112         if (mp_ncpus == 0)
  113                 mp_ncpus = 1;
  114 
  115         /*
  116          * Set the largest cpuid we're going to use. This is necessary
  117          * for VM initialization.
  118          */
  119         mp_maxid = min(mp_ncpus, MAXCPU) - 1;
  120 }
  121 
  122 int
  123 cpu_mp_probe(void)
  124 {
  125 
  126         /*
  127          * We're not going to enable SMP if there's only 1 processor.
  128          */
  129         return (mp_ncpus > 1);
  130 }
  131 
  132 void
  133 cpu_mp_start(void)
  134 {
  135         struct cpuref bsp, cpu;
  136         struct pcpu *pc;
  137         int error;
  138 
  139         error = platform_smp_get_bsp(&bsp);
  140         KASSERT(error == 0, ("Don't know BSP"));
  141         KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
  142 
  143         error = platform_smp_first_cpu(&cpu);
  144         while (!error) {
  145                 if (cpu.cr_cpuid >= MAXCPU) {
  146                         printf("SMP: cpu%d: skipped -- ID out of range\n",
  147                             cpu.cr_cpuid);
  148                         goto next;
  149                 }
  150                 if (all_cpus & (1 << cpu.cr_cpuid)) {
  151                         printf("SMP: cpu%d: skipped - duplicate ID\n",
  152                             cpu.cr_cpuid);
  153                         goto next;
  154                 }
  155                 if (cpu.cr_cpuid != bsp.cr_cpuid) {
  156                         void *dpcpu;
  157 
  158                         pc = &__pcpu[cpu.cr_cpuid];
  159                         dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
  160                         pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
  161                         dpcpu_init(dpcpu, cpu.cr_cpuid);
  162                 } else {
  163                         pc = pcpup;
  164                         pc->pc_cpuid = bsp.cr_cpuid;
  165                         pc->pc_bsp = 1;
  166                 }
  167                 pc->pc_cpumask = 1 << pc->pc_cpuid;
  168                 pc->pc_hwref = cpu.cr_hwref;
  169                 all_cpus |= pc->pc_cpumask;
  170 next:
  171                 error = platform_smp_next_cpu(&cpu);
  172         }
  173 }
  174 
  175 void
  176 cpu_mp_announce(void)
  177 {
  178         struct pcpu *pc;
  179         int i;
  180 
  181         for (i = 0; i <= mp_maxid; i++) {
  182                 pc = pcpu_find(i);
  183                 if (pc == NULL)
  184                         continue;
  185                 printf("cpu%d: dev=%x", i, pc->pc_hwref);
  186                 if (pc->pc_bsp)
  187                         printf(" (BSP)");
  188                 printf("\n");
  189         }
  190 }
  191 
  192 static void
  193 cpu_mp_unleash(void *dummy)
  194 {
  195         struct pcpu *pc;
  196         int cpus, timeout;
  197 
  198         if (mp_ncpus <= 1)
  199                 return;
  200 
  201         cpus = 0;
  202         smp_cpus = 0;
  203         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
  204                 cpus++;
  205                 pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
  206                 if (!pc->pc_bsp) {
  207                         if (bootverbose)
  208                                 printf("Waking up CPU %d (dev=%x)\n",
  209                                     pc->pc_cpuid, pc->pc_hwref);
  210 
  211                         platform_smp_start_cpu(pc);
  212                         
  213                         timeout = 2000; /* wait 2sec for the AP */
  214                         while (!pc->pc_awake && --timeout > 0)
  215                                 DELAY(1000);
  216 
  217                 } else {
  218                         PCPU_SET(pir, mfspr(SPR_PIR));
  219                         pc->pc_awake = 1;
  220                 }
  221                 if (pc->pc_awake) {
  222                         if (bootverbose)
  223                                 printf("Adding CPU %d, pir=%x, awake=%x\n",
  224                                     pc->pc_cpuid, pc->pc_pir, pc->pc_awake);
  225                         smp_cpus++;
  226                 } else
  227                         stopped_cpus |= (1 << pc->pc_cpuid);
  228         }
  229 
  230         ap_awake = 1;
  231 
  232         /* Provide our current DEC and TB values for APs */
  233         __asm __volatile("mfdec %0" : "=r"(ap_decr));
  234         ap_timebase = mftb() + 10;
  235         __asm __volatile("msync; isync");
  236         
  237         /* Let APs continue */
  238         atomic_store_rel_int(&ap_letgo, 1);
  239 
  240         mttb(ap_timebase);
  241 
  242         while (ap_awake < smp_cpus)
  243                 ;
  244 
  245         if (smp_cpus != cpus || cpus != mp_ncpus) {
  246                 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
  247                     mp_ncpus, cpus, smp_cpus);
  248         }
  249 
  250         smp_active = 1;
  251         smp_started = 1;
  252 }
  253 
  254 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
  255 
  256 int
  257 powerpc_ipi_handler(void *arg)
  258 {
  259         cpumask_t self;
  260         uint32_t ipimask;
  261         int msg;
  262 
  263         CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
  264 
  265         ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
  266         if (ipimask == 0)
  267                 return (FILTER_STRAY);
  268         while ((msg = ffs(ipimask) - 1) != -1) {
  269                 ipimask &= ~(1u << msg);
  270                 ipi_msg_cnt[msg]++;
  271                 switch (msg) {
  272                 case IPI_AST:
  273                         CTR1(KTR_SMP, "%s: IPI_AST", __func__);
  274                         break;
  275                 case IPI_PREEMPT:
  276                         CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
  277                         sched_preempt(curthread);
  278                         break;
  279                 case IPI_RENDEZVOUS:
  280                         CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
  281                         smp_rendezvous_action();
  282                         break;
  283                 case IPI_STOP:
  284 
  285                         /*
  286                          * IPI_STOP_HARD is mapped to IPI_STOP so it is not
  287                          * necessary to add such case in the switch.
  288                          */
  289                         CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
  290                             __func__);
  291                         self = PCPU_GET(cpumask);
  292                         savectx(PCPU_GET(curpcb));
  293                         atomic_set_int(&stopped_cpus, self);
  294                         while ((started_cpus & self) == 0)
  295                                 cpu_spinwait();
  296                         atomic_clear_int(&started_cpus, self);
  297                         atomic_clear_int(&stopped_cpus, self);
  298                         CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
  299                         break;
  300                 }
  301         }
  302 
  303         return (FILTER_HANDLED);
  304 }
  305 
  306 static void
  307 ipi_send(struct pcpu *pc, int ipi)
  308 {
  309 
  310         CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
  311             pc, pc->pc_cpuid, ipi);
  312 
  313         atomic_set_32(&pc->pc_ipimask, (1 << ipi));
  314         PIC_IPI(pic, pc->pc_cpuid);
  315 
  316         CTR1(KTR_SMP, "%s: sent", __func__);
  317 }
  318 
  319 /* Send an IPI to a set of cpus. */
  320 void
  321 ipi_selected(cpumask_t cpus, int ipi)
  322 {
  323         struct pcpu *pc;
  324 
  325         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
  326                 if (cpus & pc->pc_cpumask)
  327                         ipi_send(pc, ipi);
  328         }
  329 }
  330 
  331 /* Send an IPI to all CPUs EXCEPT myself. */
  332 void
  333 ipi_all_but_self(int ipi)
  334 {
  335         struct pcpu *pc;
  336 
  337         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
  338                 if (pc != pcpup)
  339                         ipi_send(pc, ipi);
  340         }
  341 }

Cache object: 7643f8230177300a501ed230a4db85c6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.