The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
   13  *    promote products derived from this software without specific prior
   14  *    written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
   29  */
   30 /*-
   31  * Copyright (c) 2002 Jake Burkholder.
   32  * Copyright (c) 2007 - 2010 Marius Strobl <marius@FreeBSD.org>
   33  * All rights reserved.
   34  *
   35  * Redistribution and use in source and binary forms, with or without
   36  * modification, are permitted provided that the following conditions
   37  * are met:
   38  * 1. Redistributions of source code must retain the above copyright
   39  *    notice, this list of conditions and the following disclaimer.
   40  * 2. Redistributions in binary form must reproduce the above copyright
   41  *    notice, this list of conditions and the following disclaimer in the
   42  *    documentation and/or other materials provided with the distribution.
   43  *
   44  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   45  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   46  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   47  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   48  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   49  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   50  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   51  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   53  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   54  * SUCH DAMAGE.
   55  */
   56 
   57 #include <sys/cdefs.h>
   58 __FBSDID("$FreeBSD$");
   59 
   60 #include <sys/param.h>
   61 #include <sys/systm.h>
   62 #include <sys/lock.h>
   63 #include <sys/kdb.h>
   64 #include <sys/kernel.h>
   65 #include <sys/ktr.h>
   66 #include <sys/mutex.h>
   67 #include <sys/pcpu.h>
   68 #include <sys/proc.h>
   69 #include <sys/sched.h>
   70 #include <sys/smp.h>
   71 
   72 #include <vm/vm.h>
   73 #include <vm/vm_param.h>
   74 #include <vm/pmap.h>
   75 #include <vm/vm_kern.h>
   76 #include <vm/vm_extern.h>
   77 #include <vm/vm_map.h>
   78 
   79 #include <dev/ofw/openfirm.h>
   80 
   81 #include <machine/asi.h>
   82 #include <machine/atomic.h>
   83 #include <machine/bus.h>
   84 #include <machine/cpu.h>
   85 #include <machine/md_var.h>
   86 #include <machine/metadata.h>
   87 #include <machine/ofw_machdep.h>
   88 #include <machine/pcb.h>
   89 #include <machine/smp.h>
   90 #include <machine/tick.h>
   91 #include <machine/tlb.h>
   92 #include <machine/tsb.h>
   93 #include <machine/tte.h>
   94 #include <machine/ver.h>
   95 
   96 #define SUNW_STARTCPU           "SUNW,start-cpu"
   97 #define SUNW_STOPSELF           "SUNW,stop-self"
   98 
   99 static ih_func_t cpu_ipi_ast;
  100 static ih_func_t cpu_ipi_preempt;
  101 static ih_func_t cpu_ipi_stop;
  102 
  103 /*
  104  * Argument area used to pass data to non-boot processors as they start up.
  105  * This must be statically initialized with a known invalid CPU module ID,
  106  * since the other processors will use it before the boot CPU enters the
  107  * kernel.
  108  */
  109 struct  cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0, 0 };
  110 struct  ipi_cache_args ipi_cache_args;
  111 struct  ipi_rd_args ipi_rd_args;
  112 struct  ipi_tlb_args ipi_tlb_args;
  113 struct  pcb stoppcbs[MAXCPU];
  114 
  115 cpu_ipi_selected_t *cpu_ipi_selected;
  116 cpu_ipi_single_t *cpu_ipi_single;
  117 
  118 static vm_offset_t mp_tramp;
  119 static u_int cpuid_to_mid[MAXCPU];
  120 static int isjbus;
  121 static volatile cpumask_t shutdown_cpus;
  122 
  123 static void ap_count(phandle_t node, u_int mid, u_int cpu_impl);
  124 static void ap_start(phandle_t node, u_int mid, u_int cpu_impl);
  125 static void cpu_mp_unleash(void *v);
  126 static void foreach_ap(phandle_t node, void (*func)(phandle_t node,
  127     u_int mid, u_int cpu_impl));
  128 static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg);
  129 
  130 static cpu_ipi_selected_t cheetah_ipi_selected;
  131 static cpu_ipi_single_t cheetah_ipi_single;
  132 static cpu_ipi_selected_t jalapeno_ipi_selected;
  133 static cpu_ipi_single_t jalapeno_ipi_single;
  134 static cpu_ipi_selected_t spitfire_ipi_selected;
  135 static cpu_ipi_single_t spitfire_ipi_single;
  136 
  137 SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
  138 
  139 CTASSERT(MAXCPU <= IDR_CHEETAH_MAX_BN_PAIRS);
  140 CTASSERT(MAXCPU <= sizeof(u_int) * NBBY);
  141 CTASSERT(MAXCPU <= sizeof(int) * NBBY);
  142 
  143 void
  144 mp_init(u_int cpu_impl)
  145 {
  146         struct tte *tp;
  147         int i;
  148 
  149         mp_tramp = (vm_offset_t)OF_claim(NULL, PAGE_SIZE, PAGE_SIZE);
  150         if (mp_tramp == (vm_offset_t)-1)
  151                 panic("%s", __func__);
  152         bcopy(mp_tramp_code, (void *)mp_tramp, mp_tramp_code_len);
  153         *(vm_offset_t *)(mp_tramp + mp_tramp_tlb_slots) = kernel_tlb_slots;
  154         *(vm_offset_t *)(mp_tramp + mp_tramp_func) = (vm_offset_t)mp_startup;
  155         tp = (struct tte *)(mp_tramp + mp_tramp_code_len);
  156         for (i = 0; i < kernel_tlb_slots; i++) {
  157                 tp[i].tte_vpn = TV_VPN(kernel_tlbs[i].te_va, TS_4M);
  158                 tp[i].tte_data = TD_V | TD_4M | TD_PA(kernel_tlbs[i].te_pa) |
  159                     TD_L | TD_CP | TD_CV | TD_P | TD_W;
  160         }
  161         for (i = 0; i < PAGE_SIZE; i += sizeof(vm_offset_t))
  162                 flush(mp_tramp + i);
  163 
  164         /*
  165          * On UP systems cpu_ipi_selected() can be called while
  166          * cpu_mp_start() wasn't so initialize these here.
  167          */
  168         if (cpu_impl == CPU_IMPL_ULTRASPARCIIIi ||
  169             cpu_impl == CPU_IMPL_ULTRASPARCIIIip) {
  170                 isjbus = 1;
  171                 cpu_ipi_selected = jalapeno_ipi_selected;
  172                 cpu_ipi_single = jalapeno_ipi_single;
  173         } else if (cpu_impl == CPU_IMPL_SPARC64V ||
  174             cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
  175                 cpu_ipi_selected = cheetah_ipi_selected;
  176                 cpu_ipi_single = cheetah_ipi_single;
  177         } else {
  178                 cpu_ipi_selected = spitfire_ipi_selected;
  179                 cpu_ipi_single = spitfire_ipi_single;
  180         }
  181 }
  182 
  183 static void
  184 foreach_ap(phandle_t node, void (*func)(phandle_t node, u_int mid,
  185     u_int cpu_impl))
  186 {
  187         char type[sizeof("cpu")];
  188         phandle_t child;
  189         u_int cpuid;
  190         uint32_t cpu_impl;
  191 
  192         /* There's no need to traverse the whole OFW tree twice. */
  193         if (mp_maxid > 0 && mp_ncpus >= mp_maxid + 1)
  194                 return;
  195 
  196         for (; node != 0; node = OF_peer(node)) {
  197                 child = OF_child(node);
  198                 if (child > 0)
  199                         foreach_ap(child, func);
  200                 else {
  201                         if (OF_getprop(node, "device_type", type,
  202                             sizeof(type)) <= 0)
  203                                 continue;
  204                         if (strcmp(type, "cpu") != 0)
  205                                 continue;
  206                         if (OF_getprop(node, "implementation#", &cpu_impl,
  207                             sizeof(cpu_impl)) <= 0)
  208                                 panic("%s: couldn't determine CPU "
  209                                     "implementation", __func__);
  210                         if (OF_getprop(node, cpu_cpuid_prop(cpu_impl), &cpuid,
  211                             sizeof(cpuid)) <= 0)
  212                                 panic("%s: couldn't determine CPU module ID",
  213                                     __func__);
  214                         if (cpuid == PCPU_GET(mid))
  215                                 continue;
  216                         (*func)(node, cpuid, cpu_impl);
  217                 }
  218         }
  219 }
  220 
  221 /*
  222  * Probe for other CPUs.
  223  */
  224 void
  225 cpu_mp_setmaxid()
  226 {
  227 
  228         all_cpus = 1 << curcpu;
  229         mp_ncpus = 1;
  230         mp_maxid = 0;
  231 
  232         foreach_ap(OF_child(OF_peer(0)), ap_count);
  233 }
  234 
  235 static void
  236 ap_count(phandle_t node __unused, u_int mid __unused, u_int cpu_impl __unused)
  237 {
  238 
  239         mp_maxid++;
  240 }
  241 
  242 int
  243 cpu_mp_probe(void)
  244 {
  245 
  246         return (mp_maxid > 0);
  247 }
  248 
  249 struct cpu_group *
  250 cpu_topo(void)
  251 {
  252 
  253         return (smp_topo_none());
  254 }
  255 
  256 static void
  257 sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
  258 {
  259         static struct {
  260                 cell_t  name;
  261                 cell_t  nargs;
  262                 cell_t  nreturns;
  263                 cell_t  cpu;
  264                 cell_t  func;
  265                 cell_t  arg;
  266         } args = {
  267                 (cell_t)SUNW_STARTCPU,
  268                 3,
  269         };
  270 
  271         args.cpu = cpu;
  272         args.func = (cell_t)func;
  273         args.arg = (cell_t)arg;
  274         ofw_entry(&args);
  275 }
  276 
  277 /*
  278  * Fire up any non-boot processors.
  279  */
  280 void
  281 cpu_mp_start(void)
  282 {
  283 
  284         intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
  285         intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
  286             -1, NULL, NULL);
  287         intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL);
  288         intr_setup(PIL_PREEMPT, cpu_ipi_preempt, -1, NULL, NULL);
  289 
  290         cpuid_to_mid[curcpu] = PCPU_GET(mid);
  291 
  292         foreach_ap(OF_child(OF_peer(0)), ap_start);
  293         KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS,
  294             ("%s: can only IPI a maximum of %d JBus-CPUs",
  295             __func__, IDR_JALAPENO_MAX_BN_PAIRS));
  296         PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
  297         smp_active = 1;
  298 }
  299 
  300 static void
  301 ap_start(phandle_t node, u_int mid, u_int cpu_impl)
  302 {
  303         volatile struct cpu_start_args *csa;
  304         struct pcpu *pc;
  305         register_t s;
  306         vm_offset_t va;
  307         u_int cpuid;
  308         uint32_t clock;
  309 
  310         if (mp_ncpus > MAXCPU)
  311                 return;
  312 
  313         if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0)
  314                 panic("%s: couldn't determine CPU frequency", __func__);
  315         if (clock != PCPU_GET(clock))
  316                 hardclock_use_stick = 1;
  317 
  318         csa = &cpu_start_args;
  319         csa->csa_state = 0;
  320         sun4u_startcpu(node, (void *)mp_tramp, 0);
  321         s = intr_disable();
  322         while (csa->csa_state != CPU_TICKSYNC)
  323                 ;
  324         membar(StoreLoad);
  325         csa->csa_tick = rd(tick);
  326         if (cpu_impl == CPU_IMPL_SPARC64V ||
  327             cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
  328                 while (csa->csa_state != CPU_STICKSYNC)
  329                         ;
  330                 membar(StoreLoad);
  331                 csa->csa_stick = rdstick();
  332         }
  333         while (csa->csa_state != CPU_INIT)
  334                 ;
  335         csa->csa_tick = csa->csa_stick = 0;
  336         intr_restore(s);
  337 
  338         cpuid = mp_ncpus++;
  339         cpuid_to_mid[cpuid] = mid;
  340         cpu_identify(csa->csa_ver, clock, cpuid);
  341 
  342         va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE);
  343         pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
  344         pcpu_init(pc, cpuid, sizeof(*pc));
  345         dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), cpuid);
  346         pc->pc_addr = va;
  347         pc->pc_clock = clock;
  348         pc->pc_impl = cpu_impl;
  349         pc->pc_mid = mid;
  350         pc->pc_node = node;
  351 
  352         cache_init(pc);
  353 
  354         all_cpus |= 1 << cpuid;
  355         intr_add_cpu(cpuid);
  356 }
  357 
  358 void
  359 cpu_mp_announce(void)
  360 {
  361 
  362 }
  363 
  364 static void
  365 cpu_mp_unleash(void *v)
  366 {
  367         volatile struct cpu_start_args *csa;
  368         struct pcpu *pc;
  369         register_t s;
  370         vm_offset_t va;
  371         vm_paddr_t pa;
  372         u_int ctx_inc;
  373         u_int ctx_min;
  374         int i;
  375 
  376         ctx_min = TLB_CTX_USER_MIN;
  377         ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus;
  378         csa = &cpu_start_args;
  379         csa->csa_count = mp_ncpus;
  380         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
  381                 pc->pc_tlb_ctx = ctx_min;
  382                 pc->pc_tlb_ctx_min = ctx_min;
  383                 pc->pc_tlb_ctx_max = ctx_min + ctx_inc;
  384                 ctx_min += ctx_inc;
  385 
  386                 if (pc->pc_cpuid == curcpu)
  387                         continue;
  388                 KASSERT(pc->pc_idlethread != NULL,
  389                     ("%s: idlethread", __func__));
  390                 pc->pc_curthread = pc->pc_idlethread;
  391                 pc->pc_curpcb = pc->pc_curthread->td_pcb;
  392                 for (i = 0; i < PCPU_PAGES; i++) {
  393                         va = pc->pc_addr + i * PAGE_SIZE;
  394                         pa = pmap_kextract(va);
  395                         if (pa == 0)
  396                                 panic("%s: pmap_kextract", __func__);
  397                         csa->csa_ttes[i].tte_vpn = TV_VPN(va, TS_8K);
  398                         csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) |
  399                             TD_L | TD_CP | TD_CV | TD_P | TD_W;
  400                 }
  401                 csa->csa_state = 0;
  402                 csa->csa_pcpu = pc->pc_addr;
  403                 csa->csa_mid = pc->pc_mid;
  404                 s = intr_disable();
  405                 while (csa->csa_state != CPU_BOOTSTRAP)
  406                         ;
  407                 intr_restore(s);
  408         }
  409 
  410         membar(StoreLoad);
  411         csa->csa_count = 0;
  412         smp_started = 1;
  413 }
  414 
  415 void
  416 cpu_mp_bootstrap(struct pcpu *pc)
  417 {
  418         volatile struct cpu_start_args *csa;
  419 
  420         csa = &cpu_start_args;
  421 
  422         /* Do CPU-specific initialization. */
  423         if (pc->pc_impl >= CPU_IMPL_ULTRASPARCIII)
  424                 cheetah_init(pc->pc_impl);
  425         else if (pc->pc_impl == CPU_IMPL_SPARC64V)
  426                 zeus_init(pc->pc_impl);
  427 
  428         /*
  429          * Enable the caches.  Note that his may include applying workarounds.
  430          */
  431         cache_enable(pc->pc_impl);
  432 
  433         /*
  434          * Clear (S)TICK timer(s) (including NPT) and ensure they are stopped.
  435          */
  436         tick_clear(pc->pc_impl);
  437         tick_stop(pc->pc_impl);
  438 
  439         /* Set the kernel context. */
  440         pmap_set_kctx();
  441 
  442         /* Lock the kernel TSB in the TLB if necessary. */
  443         if (tsb_kernel_ldd_phys == 0)
  444                 pmap_map_tsb();
  445 
  446         /*
  447          * Flush all non-locked TLB entries possibly left over by the
  448          * firmware.
  449          */
  450         tlb_flush_nonlocked();
  451 
  452         /*
  453          * Enable interrupts.
  454          * Note that the PIL we be lowered indirectly via sched_throw(NULL)
  455          * when fake spinlock held by the idle thread eventually is released.
  456          */
  457         wrpr(pstate, 0, PSTATE_KERNEL);
  458 
  459         /* Start the (S)TICK interrupts. */
  460         tick_start();
  461 
  462         smp_cpus++;
  463         KASSERT(curthread != NULL, ("%s: curthread", __func__));
  464         PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
  465         printf("SMP: AP CPU #%d Launched!\n", curcpu);
  466 
  467         csa->csa_count--;
  468         membar(StoreLoad);
  469         csa->csa_state = CPU_BOOTSTRAP;
  470         while (csa->csa_count != 0)
  471                 ;
  472 
  473         /* Ok, now enter the scheduler. */
  474         sched_throw(NULL);
  475 }
  476 
  477 void
  478 cpu_mp_shutdown(void)
  479 {
  480         int i;
  481 
  482         critical_enter();
  483         shutdown_cpus = PCPU_GET(other_cpus);
  484         if (stopped_cpus != PCPU_GET(other_cpus))       /* XXX */
  485                 stop_cpus(stopped_cpus ^ PCPU_GET(other_cpus));
  486         i = 0;
  487         while (shutdown_cpus != 0) {
  488                 if (i++ > 100000) {
  489                         printf("timeout shutting down CPUs.\n");
  490                         break;
  491                 }
  492         }
  493         critical_exit();
  494 }
  495 
  496 static void
  497 cpu_ipi_ast(struct trapframe *tf __unused)
  498 {
  499 
  500 }
  501 
  502 static void
  503 cpu_ipi_stop(struct trapframe *tf __unused)
  504 {
  505 
  506         CTR2(KTR_SMP, "%s: stopped %d", __func__, curcpu);
  507         savectx(&stoppcbs[curcpu]);
  508         atomic_set_acq_int(&stopped_cpus, PCPU_GET(cpumask));
  509         while ((started_cpus & PCPU_GET(cpumask)) == 0) {
  510                 if ((shutdown_cpus & PCPU_GET(cpumask)) != 0) {
  511                         atomic_clear_int(&shutdown_cpus, PCPU_GET(cpumask));
  512                         (void)intr_disable();
  513                         for (;;)
  514                                 ;
  515                 }
  516         }
  517         atomic_clear_rel_int(&started_cpus, PCPU_GET(cpumask));
  518         atomic_clear_rel_int(&stopped_cpus, PCPU_GET(cpumask));
  519         CTR2(KTR_SMP, "%s: restarted %d", __func__, curcpu);
  520 }
  521 
  522 static void
  523 cpu_ipi_preempt(struct trapframe *tf)
  524 {
  525 
  526         sched_preempt(curthread);
  527 }
  528 
  529 static void
  530 spitfire_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
  531 {
  532         u_int cpu;
  533 
  534         while (cpus) {
  535                 cpu = ffs(cpus) - 1;
  536                 cpus &= ~(1 << cpu);
  537                 spitfire_ipi_single(cpu, d0, d1, d2);
  538         }
  539 }
  540 
  541 static void
  542 spitfire_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
  543 {
  544         register_t s;
  545         u_long ids;
  546         u_int mid;
  547         int i;
  548 
  549         KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
  550         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0,
  551             ("%s: outstanding dispatch", __func__));
  552         mid = cpuid_to_mid[cpu];
  553         for (i = 0; i < IPI_RETRIES; i++) {
  554                 s = intr_disable();
  555                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  556                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  557                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  558                 membar(Sync);
  559                 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
  560                     ASI_SDB_INTR_W, 0);
  561                 /*
  562                  * Workaround for SpitFire erratum #54; do a dummy read
  563                  * from a SDB internal register before the MEMBAR #Sync
  564                  * for the write to ASI_SDB_INTR_W (requiring another
  565                  * MEMBAR #Sync in order to make sure the write has
  566                  * occurred before the load).
  567                  */
  568                 membar(Sync);
  569                 (void)ldxa(AA_SDB_CNTL_HIGH, ASI_SDB_CONTROL_R);
  570                 membar(Sync);
  571                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  572                     IDR_BUSY) != 0)
  573                         ;
  574                 intr_restore(s);
  575                 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
  576                         return;
  577                 /*
  578                  * Leave interrupts enabled for a bit before retrying
  579                  * in order to avoid deadlocks if the other CPU is also
  580                  * trying to send an IPI.
  581                  */
  582                 DELAY(2);
  583         }
  584         if (kdb_active != 0 || panicstr != NULL)
  585                 printf("%s: couldn't send IPI to module 0x%u\n",
  586                     __func__, mid);
  587         else
  588                 panic("%s: couldn't send IPI to module 0x%u",
  589                     __func__, mid);
  590 }
  591 
  592 static void
  593 cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
  594 {
  595         register_t s;
  596         u_long ids;
  597         u_int mid;
  598         int i;
  599 
  600         KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
  601         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
  602             IDR_CHEETAH_ALL_BUSY) == 0,
  603             ("%s: outstanding dispatch", __func__));
  604         mid = cpuid_to_mid[cpu];
  605         for (i = 0; i < IPI_RETRIES; i++) {
  606                 s = intr_disable();
  607                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  608                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  609                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  610                 membar(Sync);
  611                 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
  612                     ASI_SDB_INTR_W, 0);
  613                 membar(Sync);
  614                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  615                     IDR_BUSY) != 0)
  616                         ;
  617                 intr_restore(s);
  618                 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
  619                         return;
  620                 /*
  621                  * Leave interrupts enabled for a bit before retrying
  622                  * in order to avoid deadlocks if the other CPU is also
  623                  * trying to send an IPI.
  624                  */
  625                 DELAY(2);
  626         }
  627         if (kdb_active != 0 || panicstr != NULL)
  628                 printf("%s: couldn't send IPI to module 0x%u\n",
  629                     __func__, mid);
  630         else
  631                 panic("%s: couldn't send IPI to module 0x%u",
  632                     __func__, mid);
  633 }
  634 
  635 static void
  636 cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
  637 {
  638         register_t s;
  639         u_long ids;
  640         u_int bnp;
  641         u_int cpu;
  642         int i;
  643 
  644         KASSERT((cpus & (1 << curcpu)) == 0,
  645             ("%s: CPU can't IPI itself", __func__));
  646         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
  647             IDR_CHEETAH_ALL_BUSY) == 0,
  648             ("%s: outstanding dispatch", __func__));
  649         if (cpus == 0)
  650                 return;
  651         ids = 0;
  652         for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
  653                 s = intr_disable();
  654                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  655                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  656                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  657                 membar(Sync);
  658                 bnp = 0;
  659                 for (cpu = 0; cpu < mp_ncpus; cpu++) {
  660                         if ((cpus & (1 << cpu)) != 0) {
  661                                 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
  662                                     IDC_ITID_SHIFT) | bnp << IDC_BN_SHIFT,
  663                                     ASI_SDB_INTR_W, 0);
  664                                 membar(Sync);
  665                                 bnp++;
  666                         }
  667                 }
  668                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  669                     IDR_CHEETAH_ALL_BUSY) != 0)
  670                         ;
  671                 intr_restore(s);
  672                 if ((ids &
  673                     (IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0)
  674                         return;
  675                 bnp = 0;
  676                 for (cpu = 0; cpu < mp_ncpus; cpu++) {
  677                         if ((cpus & (1 << cpu)) != 0) {
  678                                 if ((ids & (IDR_NACK << (2 * bnp))) == 0)
  679                                         cpus &= ~(1 << cpu);
  680                                 bnp++;
  681                         }
  682                 }
  683                 /*
  684                  * On at least Fire V880 we may receive IDR_NACKs for
  685                  * CPUs we actually haven't tried to send an IPI to,
  686                  * but which apparently can be safely ignored.
  687                  */
  688                 if (cpus == 0)
  689                         return;
  690                 /*
  691                  * Leave interrupts enabled for a bit before retrying
  692                  * in order to avoid deadlocks if the other CPUs are
  693                  * also trying to send IPIs.
  694                  */
  695                 DELAY(2 * mp_ncpus);
  696         }
  697         if (kdb_active != 0 || panicstr != NULL)
  698                 printf("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)\n",
  699                     __func__, cpus, ids);
  700         else
  701                 panic("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)",
  702                     __func__, cpus, ids);
  703 }
  704 
  705 static void
  706 jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2)
  707 {
  708         register_t s;
  709         u_long ids;
  710         u_int busy, busynack, mid;
  711         int i;
  712 
  713         KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__));
  714         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
  715             IDR_CHEETAH_ALL_BUSY) == 0,
  716             ("%s: outstanding dispatch", __func__));
  717         mid = cpuid_to_mid[cpu];
  718         busy = IDR_BUSY << (2 * mid);
  719         busynack = (IDR_BUSY | IDR_NACK) << (2 * mid);
  720         for (i = 0; i < IPI_RETRIES; i++) {
  721                 s = intr_disable();
  722                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  723                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  724                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  725                 membar(Sync);
  726                 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
  727                     ASI_SDB_INTR_W, 0);
  728                 membar(Sync);
  729                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  730                     busy) != 0)
  731                         ;
  732                 intr_restore(s);
  733                 if ((ids & busynack) == 0)
  734                         return;
  735                 /*
  736                  * Leave interrupts enabled for a bit before retrying
  737                  * in order to avoid deadlocks if the other CPU is also
  738                  * trying to send an IPI.
  739                  */
  740                 DELAY(2);
  741         }
  742         if (kdb_active != 0 || panicstr != NULL)
  743                 printf("%s: couldn't send IPI to module 0x%u\n",
  744                     __func__, mid);
  745         else
  746                 panic("%s: couldn't send IPI to module 0x%u",
  747                     __func__, mid);
  748 }
  749 
  750 static void
  751 jalapeno_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
  752 {
  753         register_t s;
  754         u_long ids;
  755         u_int cpu;
  756         int i;
  757 
  758         KASSERT((cpus & (1 << curcpu)) == 0,
  759             ("%s: CPU can't IPI itself", __func__));
  760         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
  761             IDR_CHEETAH_ALL_BUSY) == 0,
  762             ("%s: outstanding dispatch", __func__));
  763         if (cpus == 0)
  764                 return;
  765         ids = 0;
  766         for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
  767                 s = intr_disable();
  768                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  769                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  770                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  771                 membar(Sync);
  772                 for (cpu = 0; cpu < mp_ncpus; cpu++) {
  773                         if ((cpus & (1 << cpu)) != 0) {
  774                                 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] <<
  775                                     IDC_ITID_SHIFT), ASI_SDB_INTR_W, 0);
  776                                 membar(Sync);
  777                         }
  778                 }
  779                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  780                     IDR_CHEETAH_ALL_BUSY) != 0)
  781                         ;
  782                 intr_restore(s);
  783                 if ((ids &
  784                     (IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0)
  785                         return;
  786                 for (cpu = 0; cpu < mp_ncpus; cpu++)
  787                         if ((cpus & (1 << cpu)) != 0)
  788                                 if ((ids & (IDR_NACK <<
  789                                     (2 * cpuid_to_mid[cpu]))) == 0)
  790                                         cpus &= ~(1 << cpu);
  791                 /*
  792                  * Leave interrupts enabled for a bit before retrying
  793                  * in order to avoid deadlocks if the other CPUs are
  794                  * also trying to send IPIs.
  795                  */
  796                 DELAY(2 * mp_ncpus);
  797         }
  798         if (kdb_active != 0 || panicstr != NULL)
  799                 printf("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)\n",
  800                     __func__, cpus, ids);
  801         else
  802                 panic("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)",
  803                     __func__, cpus, ids);
  804 }

Cache object: b438be67417c04fa94ee2f464e48028c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.