The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2011 Semihalf.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 #include <sys/cdefs.h>
   27 __FBSDID("$FreeBSD$");
   28 #include <sys/param.h>
   29 #include <sys/systm.h>
   30 #include <sys/bus.h>
   31 #include <sys/kernel.h>
   32 #include <sys/lock.h>
   33 #include <sys/mutex.h>
   34 #include <sys/proc.h>
   35 #include <sys/pcpu.h>
   36 #include <sys/sched.h>
   37 #include <sys/smp.h>
   38 #include <sys/ktr.h>
   39 #include <sys/malloc.h>
   40 
   41 #include <vm/vm.h>
   42 #include <vm/vm_extern.h>
   43 #include <vm/vm_kern.h>
   44 #include <vm/pmap.h>
   45 
   46 #include <machine/armreg.h>
   47 #include <machine/cpu.h>
   48 #include <machine/cpufunc.h>
   49 #include <machine/smp.h>
   50 #include <machine/pcb.h>
   51 #include <machine/pte.h>
   52 #include <machine/physmem.h>
   53 #include <machine/intr.h>
   54 #include <machine/vmparam.h>
   55 #ifdef VFP
   56 #include <machine/vfp.h>
   57 #endif
   58 #ifdef CPU_MV_PJ4B
   59 #include <arm/mv/mvwin.h>
   60 #include <dev/fdt/fdt_common.h>
   61 #endif
   62 
   63 #include "opt_smp.h"
   64 
   65 extern struct pcpu __pcpu[];
   66 /* used to hold the AP's until we are ready to release them */
   67 struct mtx ap_boot_mtx;
   68 struct pcb stoppcbs[MAXCPU];
   69 
   70 /* # of Applications processors */
   71 volatile int mp_naps;
   72 
   73 /* Set to 1 once we're ready to let the APs out of the pen. */
   74 volatile int aps_ready = 0;
   75 
   76 static int ipi_handler(void *arg);
   77 void set_stackptrs(int cpu);
   78 
   79 /* Temporary variables for init_secondary()  */
   80 void *dpcpu[MAXCPU - 1];
   81 
   82 /* Determine if we running MP machine */
   83 int
   84 cpu_mp_probe(void)
   85 {
   86         CPU_SETOF(0, &all_cpus);
   87 
   88         return (platform_mp_probe());
   89 }
   90 
   91 /* Start Application Processor via platform specific function */
   92 static int
   93 check_ap(void)
   94 {
   95         uint32_t ms;
   96 
   97         for (ms = 0; ms < 2000; ++ms) {
   98                 if ((mp_naps + 1) == mp_ncpus)
   99                         return (0);             /* success */
  100                 else
  101                         DELAY(1000);
  102         }
  103 
  104         return (-2);
  105 }
  106 
  107 extern unsigned char _end[];
  108 
  109 /* Initialize and fire up non-boot processors */
  110 void
  111 cpu_mp_start(void)
  112 {
  113         int error, i;
  114 
  115         mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
  116 
  117         /* Reserve memory for application processors */
  118         for(i = 0; i < (mp_ncpus - 1); i++)
  119                 dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
  120                     M_WAITOK | M_ZERO);
  121 
  122         cpu_idcache_wbinv_all();
  123         cpu_l2cache_wbinv_all();
  124         cpu_idcache_wbinv_all();
  125 
  126         /* Initialize boot code and start up processors */
  127         platform_mp_start_ap();
  128 
  129         /*  Check if ap's started properly */
  130         error = check_ap();
  131         if (error)
  132                 printf("WARNING: Some AP's failed to start\n");
  133         else
  134                 for (i = 1; i < mp_ncpus; i++)
  135                         CPU_SET(i, &all_cpus);
  136 
  137 }
  138 
  139 /* Introduce rest of cores to the world */
  140 void
  141 cpu_mp_announce(void)
  142 {
  143 
  144 }
  145 
  146 extern vm_paddr_t pmap_pa;
  147 void
  148 init_secondary(int cpu)
  149 {
  150         struct pcpu *pc;
  151         uint32_t loop_counter;
  152         int start = 0, end = 0;
  153 
  154         cpu_setup(NULL);
  155         setttb(pmap_pa);
  156         cpu_tlb_flushID();
  157 
  158         pc = &__pcpu[cpu];
  159 
  160         /*
  161          * pcpu_init() updates queue, so it should not be executed in parallel
  162          * on several cores
  163          */
  164         while(mp_naps < (cpu - 1))
  165                 ;
  166 
  167         pcpu_init(pc, cpu, sizeof(struct pcpu));
  168         dpcpu_init(dpcpu[cpu - 1], cpu);
  169 
  170         /* Provide stack pointers for other processor modes. */
  171         set_stackptrs(cpu);
  172 
  173         /* Signal our startup to BSP */
  174         atomic_add_rel_32(&mp_naps, 1);
  175 
  176         /* Spin until the BSP releases the APs */
  177         while (!aps_ready)
  178                 ;
  179 
  180         /* Initialize curthread */
  181         KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
  182         pc->pc_curthread = pc->pc_idlethread;
  183         pc->pc_curpcb = pc->pc_idlethread->td_pcb;
  184         set_curthread(pc->pc_idlethread);
  185 #ifdef VFP
  186         pc->pc_cpu = cpu;
  187 
  188         vfp_init();
  189 #endif
  190 
  191         mtx_lock_spin(&ap_boot_mtx);
  192 
  193         atomic_add_rel_32(&smp_cpus, 1);
  194 
  195         if (smp_cpus == mp_ncpus) {
  196                 /* enable IPI's, tlb shootdown, freezes etc */
  197                 atomic_store_rel_int(&smp_started, 1);
  198         }
  199 
  200         mtx_unlock_spin(&ap_boot_mtx);
  201 
  202         /* Enable ipi */
  203 #ifdef IPI_IRQ_START
  204         start = IPI_IRQ_START;
  205 #ifdef IPI_IRQ_END
  206         end = IPI_IRQ_END;
  207 #else
  208         end = IPI_IRQ_START;
  209 #endif
  210 #endif
  211                                 
  212         for (int i = start; i <= end; i++)
  213                 arm_unmask_irq(i);
  214         enable_interrupts(PSR_I);
  215 
  216         loop_counter = 0;
  217         while (smp_started == 0) {
  218                 DELAY(100);
  219                 loop_counter++;
  220                 if (loop_counter == 1000)
  221                         CTR0(KTR_SMP, "AP still wait for smp_started");
  222         }
  223         /* Start per-CPU event timers. */
  224         cpu_initclocks_ap();
  225 
  226         CTR0(KTR_SMP, "go into scheduler");
  227         platform_mp_init_secondary();
  228 
  229         /* Enter the scheduler */
  230         sched_throw(NULL);
  231 
  232         panic("scheduler returned us to %s", __func__);
  233         /* NOTREACHED */
  234 }
  235 
  236 static int
  237 ipi_handler(void *arg)
  238 {
  239         u_int   cpu, ipi;
  240 
  241         cpu = PCPU_GET(cpuid);
  242 
  243         ipi = pic_ipi_get((int)arg);
  244 
  245         while ((ipi != 0x3ff)) {
  246                 switch (ipi) {
  247                 case IPI_RENDEZVOUS:
  248                         CTR0(KTR_SMP, "IPI_RENDEZVOUS");
  249                         smp_rendezvous_action();
  250                         break;
  251 
  252                 case IPI_AST:
  253                         CTR0(KTR_SMP, "IPI_AST");
  254                         break;
  255 
  256                 case IPI_STOP:
  257                         /*
  258                          * IPI_STOP_HARD is mapped to IPI_STOP so it is not
  259                          * necessary to add it in the switch.
  260                          */
  261                         CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
  262 
  263                         savectx(&stoppcbs[cpu]);
  264 
  265                         /*
  266                          * CPUs are stopped when entering the debugger and at
  267                          * system shutdown, both events which can precede a
  268                          * panic dump.  For the dump to be correct, all caches
  269                          * must be flushed and invalidated, but on ARM there's
  270                          * no way to broadcast a wbinv_all to other cores.
  271                          * Instead, we have each core do the local wbinv_all as
  272                          * part of stopping the core.  The core requesting the
  273                          * stop will do the l2 cache flush after all other cores
  274                          * have done their l1 flushes and stopped.
  275                          */
  276                         cpu_idcache_wbinv_all();
  277 
  278                         /* Indicate we are stopped */
  279                         CPU_SET_ATOMIC(cpu, &stopped_cpus);
  280 
  281                         /* Wait for restart */
  282                         while (!CPU_ISSET(cpu, &started_cpus))
  283                                 cpu_spinwait();
  284 
  285                         CPU_CLR_ATOMIC(cpu, &started_cpus);
  286                         CPU_CLR_ATOMIC(cpu, &stopped_cpus);
  287                         CTR0(KTR_SMP, "IPI_STOP (restart)");
  288                         break;
  289                 case IPI_PREEMPT:
  290                         CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
  291                         sched_preempt(curthread);
  292                         break;
  293                 case IPI_HARDCLOCK:
  294                         CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
  295                         hardclockintr();
  296                         break;
  297                 case IPI_TLB:
  298                         CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
  299                         cpufuncs.cf_tlb_flushID();
  300                         break;
  301                 default:
  302                         panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
  303                 }
  304 
  305                 pic_ipi_clear(ipi);
  306                 ipi = pic_ipi_get(-1);
  307         }
  308 
  309         return (FILTER_HANDLED);
  310 }
  311 
  312 static void
  313 release_aps(void *dummy __unused)
  314 {
  315         uint32_t loop_counter;
  316         int start = 0, end = 0;
  317 
  318         if (mp_ncpus == 1)
  319                 return;
  320 #ifdef IPI_IRQ_START
  321         start = IPI_IRQ_START;
  322 #ifdef IPI_IRQ_END
  323         end = IPI_IRQ_END;
  324 #else
  325         end = IPI_IRQ_START;
  326 #endif
  327 #endif
  328 
  329         for (int i = start; i <= end; i++) {
  330                 /*
  331                  * IPI handler
  332                  */
  333                 /* 
  334                  * Use 0xdeadbeef as the argument value for irq 0,
  335                  * if we used 0, the intr code will give the trap frame
  336                  * pointer instead.
  337                  */
  338                 arm_setup_irqhandler("ipi", ipi_handler, NULL, (void *)i, i,
  339                     INTR_TYPE_MISC | INTR_EXCL, NULL);
  340 
  341                 /* Enable ipi */
  342                 arm_unmask_irq(i);
  343         }
  344         atomic_store_rel_int(&aps_ready, 1);
  345 
  346         printf("Release APs\n");
  347 
  348         for (loop_counter = 0; loop_counter < 2000; loop_counter++) {
  349                 if (smp_started)
  350                         return;
  351                 DELAY(1000);
  352         }
  353         printf("AP's not started\n");
  354 }
  355 
  356 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
  357 
  358 struct cpu_group *
  359 cpu_topo(void)
  360 {
  361 
  362         return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0));
  363 }
  364 
  365 void
  366 cpu_mp_setmaxid(void)
  367 {
  368 
  369         platform_mp_setmaxid();
  370 }
  371 
  372 /* Sending IPI */
  373 void
  374 ipi_all_but_self(u_int ipi)
  375 {
  376         cpuset_t other_cpus;
  377 
  378         other_cpus = all_cpus;
  379         CPU_CLR(PCPU_GET(cpuid), &other_cpus);
  380         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
  381         platform_ipi_send(other_cpus, ipi);
  382 }
  383 
  384 void
  385 ipi_cpu(int cpu, u_int ipi)
  386 {
  387         cpuset_t cpus;
  388 
  389         CPU_ZERO(&cpus);
  390         CPU_SET(cpu, &cpus);
  391 
  392         CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
  393         platform_ipi_send(cpus, ipi);
  394 }
  395 
  396 void
  397 ipi_selected(cpuset_t cpus, u_int ipi)
  398 {
  399 
  400         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
  401         platform_ipi_send(cpus, ipi);
  402 }
  403 
  404 void
  405 tlb_broadcast(int ipi)
  406 {
  407 
  408         if (smp_started)
  409                 ipi_all_but_self(ipi);
  410 }

Cache object: 47c37cdc5256591a3d3d6ec42ca12505


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.