The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Neelkanth Natu
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: stable/8/sys/mips/mips/mp_machdep.c 218810 2011-02-18 16:29:38Z ken $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/ktr.h>
   33 #include <sys/proc.h>
   34 #include <sys/lock.h>
   35 #include <sys/mutex.h>
   36 #include <sys/kernel.h>
   37 #include <sys/pcpu.h>
   38 #include <sys/smp.h>
   39 #include <sys/sched.h>
   40 #include <sys/bus.h>
   41 
   42 #include <vm/vm.h>
   43 #include <vm/pmap.h>
   44 #include <vm/vm_extern.h>
   45 #include <vm/vm_kern.h>
   46 
   47 #include <machine/clock.h>
   48 #include <machine/smp.h>
   49 #include <machine/hwfunc.h>
   50 #include <machine/intr_machdep.h>
   51 #include <machine/cache.h>
   52 #include <machine/tlb.h>
   53 
   54 struct pcb stoppcbs[MAXCPU];
   55 
   56 static void *dpcpu;
   57 static struct mtx ap_boot_mtx;
   58 
   59 static volatile int aps_ready;
   60 static volatile int mp_naps;
   61 
   62 static void
   63 ipi_send(struct pcpu *pc, int ipi)
   64 {
   65 
   66         CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
   67 
   68         atomic_set_32(&pc->pc_pending_ipis, ipi);
   69         platform_ipi_send(pc->pc_cpuid);
   70 
   71         CTR1(KTR_SMP, "%s: sent", __func__);
   72 }
   73 
   74 void
   75 ipi_all_but_self(u_int ipi)
   76 {
   77 
   78         ipi_selected(PCPU_GET(other_cpus), ipi);
   79 }
   80 
   81 /* Send an IPI to a set of cpus. */
   82 void
   83 ipi_selected(cpumask_t cpus, int ipi)
   84 {
   85         struct pcpu *pc;
   86 
   87         CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
   88 
   89         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
   90                 if ((cpus & pc->pc_cpumask) != 0)
   91                         ipi_send(pc, ipi);
   92         }
   93 }
   94 
   95 /* Send an IPI to a specific CPU. */
   96 void
   97 ipi_cpu(int cpu, u_int ipi)
   98 {
   99 
  100         CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
  101         ipi_send(cpuid_to_pcpu[cpu], ipi);
  102 }
  103 
  104 /*
  105  * Handle an IPI sent to this processor.
  106  */
  107 static int
  108 mips_ipi_handler(void *arg)
  109 {
  110         int cpu;
  111         cpumask_t cpumask;
  112         u_int   ipi, ipi_bitmap;
  113         int     bit;
  114 
  115         cpu = PCPU_GET(cpuid);
  116         cpumask = PCPU_GET(cpumask);
  117 
  118         platform_ipi_clear();   /* quiesce the pending ipi interrupt */
  119 
  120         ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
  121         if (ipi_bitmap == 0)
  122                 return (FILTER_STRAY);
  123 
  124         CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
  125 
  126         while ((bit = ffs(ipi_bitmap))) {
  127                 bit = bit - 1;
  128                 ipi = 1 << bit;
  129                 ipi_bitmap &= ~ipi;
  130                 switch (ipi) {
  131                 case IPI_RENDEZVOUS:
  132                         CTR0(KTR_SMP, "IPI_RENDEZVOUS");
  133                         smp_rendezvous_action();
  134                         break;
  135 
  136                 case IPI_AST:
  137                         CTR0(KTR_SMP, "IPI_AST");
  138                         break;
  139 
  140                 case IPI_STOP:
  141                         /*
  142                          * IPI_STOP_HARD is mapped to IPI_STOP so it is not
  143                          * necessary to add it in the switch.
  144                          */
  145                         CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
  146 
  147                         savectx(&stoppcbs[cpu]);
  148                         tlb_save();
  149 
  150                         /* Indicate we are stopped */
  151                         atomic_set_int(&stopped_cpus, cpumask);
  152 
  153                         /* Wait for restart */
  154                         while ((started_cpus & cpumask) == 0)
  155                                 cpu_spinwait();
  156 
  157                         atomic_clear_int(&started_cpus, cpumask);
  158                         atomic_clear_int(&stopped_cpus, cpumask);
  159                         CTR0(KTR_SMP, "IPI_STOP (restart)");
  160                         break;
  161                 case IPI_PREEMPT:
  162                         CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
  163                         sched_preempt(curthread);
  164                         break;
  165 #if 0
  166                 case IPI_HARDCLOCK:
  167                         CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
  168                         hardclockintr();
  169                         break;
  170 #endif
  171                 default:
  172                         panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
  173                 }
  174         }
  175 
  176         return (FILTER_HANDLED);
  177 }
  178 
  179 static int
  180 start_ap(int cpuid)
  181 {
  182         int cpus, ms;
  183 
  184         cpus = mp_naps;
  185         dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
  186 
  187         mips_sync();
  188 
  189         if (platform_start_ap(cpuid) != 0)
  190                 return (-1);                    /* could not start AP */
  191 
  192         for (ms = 0; ms < 5000; ++ms) {
  193                 if (mp_naps > cpus)
  194                         return (0);             /* success */
  195                 else
  196                         DELAY(1000);
  197         }
  198 
  199         return (-2);                            /* timeout initializing AP */
  200 }
  201 
  202 void
  203 cpu_mp_setmaxid(void)
  204 {
  205 
  206         mp_ncpus = platform_num_processors();
  207         if (mp_ncpus <= 0)
  208                 mp_ncpus = 1;
  209 
  210         mp_maxid = min(mp_ncpus, MAXCPU) - 1;
  211 }
  212 
  213 void
  214 cpu_mp_announce(void)
  215 {
  216         /* NOTHING */
  217 }
  218 
  219 struct cpu_group *
  220 cpu_topo(void)
  221 {
  222         return (platform_smp_topo());
  223 }
  224 
  225 int
  226 cpu_mp_probe(void)
  227 {
  228 
  229         return (mp_ncpus > 1);
  230 }
  231 
  232 void
  233 cpu_mp_start(void)
  234 {
  235         int error, cpuid;
  236 
  237         mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
  238 
  239         all_cpus = 1;           /* BSP */
  240         for (cpuid = 1; cpuid < platform_num_processors(); ++cpuid) {
  241                 if (cpuid >= MAXCPU) {
  242                         printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
  243                         continue;
  244                 }
  245 
  246                 if ((error = start_ap(cpuid)) != 0) {
  247                         printf("AP #%d failed to start: %d\n", cpuid, error);
  248                         continue;
  249                 }
  250                 
  251                 if (bootverbose)
  252                         printf("AP #%d started!\n", cpuid);
  253 
  254                 all_cpus |= 1 << cpuid;
  255         }
  256 
  257         PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
  258 }
  259 
  260 void
  261 smp_init_secondary(u_int32_t cpuid)
  262 {
  263         /* TLB */
  264         mips_wr_wired(0);
  265         tlb_invalidate_all();
  266         mips_wr_wired(VMWIRED_ENTRIES);
  267 
  268         /*
  269          * We assume that the L1 cache on the APs is identical to the one
  270          * on the BSP.
  271          */
  272         mips_dcache_wbinv_all();
  273         mips_icache_sync_all();
  274 
  275         mips_sync();
  276 
  277         mips_wr_entryhi(0);
  278 
  279         pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
  280         dpcpu_init(dpcpu, cpuid);
  281 
  282         /* The AP has initialized successfully - allow the BSP to proceed */
  283         ++mp_naps;
  284 
  285         /* Spin until the BSP is ready to release the APs */
  286         while (!aps_ready)
  287                 ;
  288 
  289         /* Initialize curthread. */
  290         KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
  291         PCPU_SET(curthread, PCPU_GET(idlethread));
  292 
  293         mtx_lock_spin(&ap_boot_mtx);
  294 
  295         smp_cpus++;
  296 
  297         CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
  298 
  299         /* Build our map of 'other' CPUs. */
  300         PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
  301 
  302         if (bootverbose)
  303                 printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
  304 
  305         if (smp_cpus == mp_ncpus) {
  306                 atomic_store_rel_int(&smp_started, 1);
  307                 smp_active = 1;
  308         }
  309 
  310         mtx_unlock_spin(&ap_boot_mtx);
  311 
  312         while (smp_started == 0)
  313                 ; /* nothing */
  314 
  315 #if 0
  316         /* Start per-CPU event timers. */
  317         cpu_initclocks_ap();
  318 #endif
  319 
  320         /* enter the scheduler */
  321         sched_throw(NULL);
  322 
  323         panic("scheduler returned us to %s", __func__);
  324         /* NOTREACHED */
  325 }
  326 
  327 static void
  328 release_aps(void *dummy __unused)
  329 {
  330         int ipi_irq;
  331 
  332         if (mp_ncpus == 1)
  333                 return;
  334 
  335         /*
  336          * IPI handler
  337          */
  338         ipi_irq = platform_ipi_intrnum();
  339         cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
  340                                INTR_TYPE_MISC | INTR_EXCL | INTR_FAST, NULL);
  341 
  342         atomic_store_rel_int(&aps_ready, 1);
  343 
  344         while (smp_started == 0)
  345                 ; /* nothing */
  346 }
  347 
  348 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);

Cache object: f36f8003c11bdf7f5f1c3b6b201df997


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.