The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
   13  *    promote products derived from this software without specific prior
   14  *    written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp
   29  */
   30 /*-
   31  * Copyright (c) 2002 Jake Burkholder.
   32  * All rights reserved.
   33  *
   34  * Redistribution and use in source and binary forms, with or without
   35  * modification, are permitted provided that the following conditions
   36  * are met:
   37  * 1. Redistributions of source code must retain the above copyright
   38  *    notice, this list of conditions and the following disclaimer.
   39  * 2. Redistributions in binary form must reproduce the above copyright
   40  *    notice, this list of conditions and the following disclaimer in the
   41  *    documentation and/or other materials provided with the distribution.
   42  *
   43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   53  * SUCH DAMAGE.
   54  */
   55 
   56 #include <sys/cdefs.h>
   57 __FBSDID("$FreeBSD$");
   58 
   59 #include <sys/param.h>
   60 #include <sys/systm.h>
   61 #include <sys/lock.h>
   62 #include <sys/kdb.h>
   63 #include <sys/kernel.h>
   64 #include <sys/ktr.h>
   65 #include <sys/mutex.h>
   66 #include <sys/pcpu.h>
   67 #include <sys/proc.h>
   68 #include <sys/sched.h>
   69 #include <sys/smp.h>
   70 
   71 #include <vm/vm.h>
   72 #include <vm/vm_param.h>
   73 #include <vm/pmap.h>
   74 #include <vm/vm_kern.h>
   75 #include <vm/vm_extern.h>
   76 #include <vm/vm_map.h>
   77 
   78 #include <dev/ofw/openfirm.h>
   79 
   80 #include <machine/asi.h>
   81 #include <machine/atomic.h>
   82 #include <machine/bus.h>
   83 #include <machine/md_var.h>
   84 #include <machine/metadata.h>
   85 #include <machine/ofw_machdep.h>
   86 #include <machine/pcb.h>
   87 #include <machine/smp.h>
   88 #include <machine/tick.h>
   89 #include <machine/tlb.h>
   90 #include <machine/tte.h>
   91 #include <machine/ver.h>
   92 
   93 static ih_func_t cpu_ipi_ast;
   94 static ih_func_t cpu_ipi_stop;
   95 
   96 /*
   97  * Argument area used to pass data to non-boot processors as they start up.
   98  * This must be statically initialized with a known invalid CPU module ID,
   99  * since the other processors will use it before the boot CPU enters the
  100  * kernel.
  101  */
  102 struct  cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0 };
  103 struct  ipi_cache_args ipi_cache_args;
  104 struct  ipi_tlb_args ipi_tlb_args;
  105 struct  pcb stoppcbs[MAXCPU];
  106 
  107 struct  mtx ipi_mtx;
  108 
  109 cpu_ipi_selected_t *cpu_ipi_selected;
  110 
  111 static vm_offset_t mp_tramp;
  112 static u_int cpuid_to_mid[MAXCPU];
  113 static int isjbus;
  114 static volatile u_int shutdown_cpus;
  115 
  116 static void cpu_mp_unleash(void *v);
  117 static void spitfire_ipi_send(u_int, u_long, u_long, u_long);
  118 static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg);
  119 static void sun4u_stopself(void);
  120 
  121 static cpu_ipi_selected_t cheetah_ipi_selected;
  122 static cpu_ipi_selected_t spitfire_ipi_selected;
  123 
  124 SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
  125 
  126 CTASSERT(MAXCPU <= IDR_CHEETAH_MAX_BN_PAIRS);
  127 CTASSERT(MAXCPU <= sizeof(u_int) * NBBY);
  128 CTASSERT(MAXCPU <= sizeof(int) * NBBY);
  129 
  130 void
  131 mp_init(void)
  132 {
  133         struct tte *tp;
  134         int i;
  135 
  136         mp_tramp = (vm_offset_t)OF_claim(NULL, PAGE_SIZE, PAGE_SIZE);
  137         if (mp_tramp == (vm_offset_t)-1)
  138                 panic("%s", __func__);
  139         bcopy(mp_tramp_code, (void *)mp_tramp, mp_tramp_code_len);
  140         *(vm_offset_t *)(mp_tramp + mp_tramp_tlb_slots) = kernel_tlb_slots;
  141         *(vm_offset_t *)(mp_tramp + mp_tramp_func) = (vm_offset_t)mp_startup;
  142         tp = (struct tte *)(mp_tramp + mp_tramp_code_len);
  143         for (i = 0; i < kernel_tlb_slots; i++) {
  144                 tp[i].tte_vpn = TV_VPN(kernel_tlbs[i].te_va, TS_4M);
  145                 tp[i].tte_data = TD_V | TD_4M | TD_PA(kernel_tlbs[i].te_pa) |
  146                     TD_L | TD_CP | TD_CV | TD_P | TD_W;
  147         }
  148         for (i = 0; i < PAGE_SIZE; i += sizeof(vm_offset_t))
  149                 flush(mp_tramp + i);
  150 
  151         /*
  152          * On UP systems cpu_ipi_selected() can be called while
  153          * cpu_mp_start() wasn't so initialize these here.
  154          */
  155         if (cpu_impl == CPU_IMPL_ULTRASPARCIIIi ||
  156             cpu_impl == CPU_IMPL_ULTRASPARCIIIip)
  157                 isjbus = 1;
  158         if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
  159                 cpu_ipi_selected = cheetah_ipi_selected;
  160         else
  161                 cpu_ipi_selected = spitfire_ipi_selected;
  162 }
  163 
  164 /*
  165  * Probe for other CPUs.
  166  */
  167 void
  168 cpu_mp_setmaxid(void)
  169 {
  170         char buf[128];
  171         phandle_t child;
  172         int cpus;
  173 
  174         all_cpus = 1 << curcpu;
  175         mp_ncpus = 1;
  176 
  177         cpus = 0;
  178         for (child = OF_child(OF_peer(0)); child != 0; child = OF_peer(child))
  179                 if (OF_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
  180                     strcmp(buf, "cpu") == 0)
  181                         cpus++;
  182         mp_maxid = cpus - 1;
  183 }
  184 
  185 int
  186 cpu_mp_probe(void)
  187 {
  188 
  189         return (mp_maxid > 0);
  190 }
  191 
  192 static void
  193 sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
  194 {
  195         static struct {
  196                 cell_t  name;
  197                 cell_t  nargs;
  198                 cell_t  nreturns;
  199                 cell_t  cpu;
  200                 cell_t  func;
  201                 cell_t  arg;
  202         } args = {
  203                 (cell_t)"SUNW,start-cpu",
  204                 3,
  205         };
  206 
  207         args.cpu = cpu;
  208         args.func = (cell_t)func;
  209         args.arg = (cell_t)arg;
  210         openfirmware(&args);
  211 }
  212 
  213 /*
  214  * Stop the calling CPU.
  215  */
  216 static void
  217 sun4u_stopself(void)
  218 {
  219         static struct {
  220                 cell_t  name;
  221                 cell_t  nargs;
  222                 cell_t  nreturns;
  223         } args = {
  224                 (cell_t)"SUNW,stop-self",
  225         };
  226 
  227         openfirmware_exit(&args);
  228         panic("%s: failed.", __func__);
  229 }
  230 
  231 /*
  232  * Fire up any non-boot processors.
  233  */
  234 void
  235 cpu_mp_start(void)
  236 {
  237         char buf[128];
  238         volatile struct cpu_start_args *csa;
  239         struct pcpu *pc;
  240         register_t s;
  241         vm_offset_t va;
  242         phandle_t child;
  243         u_int clock;
  244         u_int mid;
  245         int cpuid;
  246 
  247         mtx_init(&ipi_mtx, "ipi", NULL, MTX_SPIN);
  248 
  249         intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL);
  250         intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action,
  251             -1, NULL, NULL);
  252         intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL);
  253 
  254         cpuid_to_mid[curcpu] = PCPU_GET(mid);
  255 
  256         csa = &cpu_start_args;
  257         for (child = OF_child(OF_peer(0)); child != 0 && mp_ncpus <= MAXCPU;
  258             child = OF_peer(child)) {
  259                 if (OF_getprop(child, "device_type", buf, sizeof(buf)) <= 0 ||
  260                     strcmp(buf, "cpu") != 0)
  261                         continue;
  262                 if (OF_getprop(child, cpu_impl < CPU_IMPL_ULTRASPARCIII ?
  263                     "upa-portid" : "portid", &mid, sizeof(mid)) <= 0)
  264                         panic("%s: can't get module ID", __func__);
  265                 if (mid == PCPU_GET(mid))
  266                         continue;
  267                 if (OF_getprop(child, "clock-frequency", &clock,
  268                     sizeof(clock)) <= 0)
  269                         panic("%s: can't get clock", __func__);
  270 
  271                 csa->csa_state = 0;
  272                 sun4u_startcpu(child, (void *)mp_tramp, 0);
  273                 s = intr_disable();
  274                 while (csa->csa_state != CPU_CLKSYNC)
  275                         ;
  276                 membar(StoreLoad);
  277                 csa->csa_tick = rd(tick);
  278                 while (csa->csa_state != CPU_INIT)
  279                         ;
  280                 csa->csa_tick = 0;
  281                 intr_restore(s);
  282 
  283                 cpuid = mp_ncpus++;
  284                 cpuid_to_mid[cpuid] = mid;
  285                 cpu_identify(csa->csa_ver, clock, cpuid);
  286 
  287                 va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE);
  288                 pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
  289                 pcpu_init(pc, cpuid, sizeof(*pc));
  290                 pc->pc_addr = va;
  291                 pc->pc_mid = mid;
  292                 pc->pc_node = child;
  293 
  294                 all_cpus |= 1 << cpuid;
  295         }
  296         KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS,
  297             ("%s: can only IPI a maximum of %d JBus-CPUs",
  298             __func__, IDR_JALAPENO_MAX_BN_PAIRS));
  299         PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
  300         smp_active = 1;
  301 }
  302 
  303 void
  304 cpu_mp_announce(void)
  305 {
  306 
  307 }
  308 
  309 static void
  310 cpu_mp_unleash(void *v)
  311 {
  312         volatile struct cpu_start_args *csa;
  313         struct pcpu *pc;
  314         register_t s;
  315         vm_offset_t va;
  316         vm_paddr_t pa;
  317         u_int ctx_min;
  318         u_int ctx_inc;
  319         int i;
  320 
  321         ctx_min = TLB_CTX_USER_MIN;
  322         ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus;
  323         csa = &cpu_start_args;
  324         csa->csa_count = mp_ncpus;
  325         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
  326                 pc->pc_tlb_ctx = ctx_min;
  327                 pc->pc_tlb_ctx_min = ctx_min;
  328                 pc->pc_tlb_ctx_max = ctx_min + ctx_inc;
  329                 ctx_min += ctx_inc;
  330 
  331                 if (pc->pc_cpuid == curcpu)
  332                         continue;
  333                 KASSERT(pc->pc_idlethread != NULL,
  334                     ("%s: idlethread", __func__));
  335                 pc->pc_curthread = pc->pc_idlethread;
  336                 pc->pc_curpcb = pc->pc_curthread->td_pcb;
  337                 for (i = 0; i < PCPU_PAGES; i++) {
  338                         va = pc->pc_addr + i * PAGE_SIZE;
  339                         pa = pmap_kextract(va);
  340                         if (pa == 0)
  341                                 panic("%s: pmap_kextract", __func__);
  342                         csa->csa_ttes[i].tte_vpn = TV_VPN(va, TS_8K);
  343                         csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) |
  344                             TD_L | TD_CP | TD_CV | TD_P | TD_W;
  345                 }
  346                 csa->csa_state = 0;
  347                 csa->csa_pcpu = pc->pc_addr;
  348                 csa->csa_mid = pc->pc_mid;
  349                 s = intr_disable();
  350                 while (csa->csa_state != CPU_BOOTSTRAP)
  351                         ;
  352                 intr_restore(s);
  353         }
  354 
  355         membar(StoreLoad);
  356         csa->csa_count = 0;
  357         smp_started = 1;
  358 }
  359 
  360 void
  361 cpu_mp_bootstrap(struct pcpu *pc)
  362 {
  363         volatile struct cpu_start_args *csa;
  364 
  365         csa = &cpu_start_args;
  366         pmap_map_tsb();
  367         cpu_setregs(pc);
  368         tick_start();
  369 
  370         smp_cpus++;
  371         KASSERT(curthread != NULL, ("%s: curthread", __func__));
  372         PCPU_SET(other_cpus, all_cpus & ~(1 << curcpu));
  373         printf("SMP: AP CPU #%d Launched!\n", curcpu);
  374 
  375         csa->csa_count--;
  376         membar(StoreLoad);
  377         csa->csa_state = CPU_BOOTSTRAP;
  378         while (csa->csa_count != 0)
  379                 ;
  380 
  381         /* ok, now enter the scheduler */
  382         sched_throw(NULL);
  383 }
  384 
  385 void
  386 cpu_mp_shutdown(void)
  387 {
  388         int i;
  389 
  390         critical_enter();
  391         shutdown_cpus = PCPU_GET(other_cpus);
  392         if (stopped_cpus != PCPU_GET(other_cpus))       /* XXX */
  393                 stop_cpus(stopped_cpus ^ PCPU_GET(other_cpus));
  394         i = 0;
  395         while (shutdown_cpus != 0) {
  396                 if (i++ > 100000) {
  397                         printf("timeout shutting down CPUs.\n");
  398                         break;
  399                 }
  400         }
  401         /* XXX: delay a bit to allow the CPUs to actually enter the PROM. */
  402         DELAY(100000);
  403         critical_exit();
  404 }
  405 
  406 static void
  407 cpu_ipi_ast(struct trapframe *tf)
  408 {
  409 
  410 }
  411 
  412 static void
  413 cpu_ipi_stop(struct trapframe *tf)
  414 {
  415 
  416         CTR2(KTR_SMP, "%s: stopped %d", __func__, curcpu);
  417         savectx(&stoppcbs[curcpu]);
  418         atomic_set_acq_int(&stopped_cpus, PCPU_GET(cpumask));
  419         while ((started_cpus & PCPU_GET(cpumask)) == 0) {
  420                 if ((shutdown_cpus & PCPU_GET(cpumask)) != 0) {
  421                         atomic_clear_int(&shutdown_cpus, PCPU_GET(cpumask));
  422                         sun4u_stopself();
  423                 }
  424         }
  425         atomic_clear_rel_int(&started_cpus, PCPU_GET(cpumask));
  426         atomic_clear_rel_int(&stopped_cpus, PCPU_GET(cpumask));
  427         CTR2(KTR_SMP, "%s: restarted %d", __func__, curcpu);
  428 }
  429 
  430 static void
  431 spitfire_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
  432 {
  433         u_int cpu;
  434 
  435         KASSERT((cpus & (1 << curcpu)) == 0,
  436             ("%s: CPU can't IPI itself", __func__));
  437         while (cpus) {
  438                 cpu = ffs(cpus) - 1;
  439                 cpus &= ~(1 << cpu);
  440                 spitfire_ipi_send(cpuid_to_mid[cpu], d0, d1, d2);
  441         }
  442 }
  443 
  444 static void
  445 spitfire_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2)
  446 {
  447         register_t s;
  448         u_long ids;
  449         int i;
  450 
  451         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0,
  452             ("%s: outstanding dispatch", __func__));
  453         for (i = 0; i < IPI_RETRIES; i++) {
  454                 s = intr_disable();
  455                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  456                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  457                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  458                 membar(Sync);
  459                 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT),
  460                     ASI_SDB_INTR_W, 0);
  461                 /*
  462                  * Workaround for SpitFire erratum #54; do a dummy read
  463                  * from a SDB internal register before the MEMBAR #Sync
  464                  * for the write to ASI_SDB_INTR_W (requiring another
  465                  * MEMBAR #Sync in order to make sure the write has
  466                  * occurred before the load).
  467                  */
  468                 membar(Sync);
  469                 (void)ldxa(AA_SDB_CNTL_HIGH, ASI_SDB_CONTROL_R);
  470                 membar(Sync);
  471                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  472                     IDR_BUSY) != 0)
  473                         ;
  474                 intr_restore(s);
  475                 if ((ids & (IDR_BUSY | IDR_NACK)) == 0)
  476                         return;
  477                 /*
  478                  * Leave interrupts enabled for a bit before retrying
  479                  * in order to avoid deadlocks if the other CPU is also
  480                  * trying to send an IPI.
  481                  */
  482                 DELAY(2);
  483         }
  484         if (
  485 #ifdef KDB
  486             kdb_active ||
  487 #endif
  488             panicstr != NULL)
  489                 printf("%s: couldn't send IPI to module 0x%u\n",
  490                     __func__, mid);
  491         else
  492                 panic("%s: couldn't send IPI", __func__);
  493 }
  494 
  495 static void
  496 cheetah_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2)
  497 {
  498         register_t s;
  499         u_long ids;
  500         u_int bnp;
  501         u_int cpu;
  502         int i;
  503 
  504         KASSERT((cpus & (1 << curcpu)) == 0,
  505             ("%s: CPU can't IPI itself", __func__));
  506         KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) &
  507             IDR_CHEETAH_ALL_BUSY) == 0,
  508             ("%s: outstanding dispatch", __func__));
  509         if (cpus == 0)
  510                 return;
  511         ids = 0;
  512         for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) {
  513                 s = intr_disable();
  514                 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0);
  515                 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1);
  516                 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2);
  517                 membar(Sync);
  518                 bnp = 0;
  519                 for (cpu = 0; cpu < mp_ncpus; cpu++) {
  520                         if ((cpus & (1 << cpu)) != 0) {
  521                                 stxa(AA_INTR_SEND |
  522                                     (cpuid_to_mid[cpu] << IDC_ITID_SHIFT) |
  523                                     (isjbus ? 0 : bnp << IDC_BN_SHIFT),
  524                                     ASI_SDB_INTR_W, 0);
  525                                 membar(Sync);
  526                                 bnp++;
  527                         }
  528                 }
  529                 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) &
  530                     IDR_CHEETAH_ALL_BUSY) != 0)
  531                         ;
  532                 intr_restore(s);
  533                 if ((ids & (IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0)
  534                         return;
  535                 bnp = 0;
  536                 for (cpu = 0; cpu < mp_ncpus; cpu++) {
  537                         if ((cpus & (1 << cpu)) != 0) {
  538                                 if ((ids & (IDR_NACK << (isjbus ?
  539                                     (2 * cpuid_to_mid[cpu]) :
  540                                     (2 * bnp)))) == 0)
  541                                         cpus &= ~(1 << cpu);
  542                                 bnp++;
  543                         }
  544                 }
  545                 /*
  546                  * Leave interrupts enabled for a bit before retrying
  547                  * in order to avoid deadlocks if the other CPUs are
  548                  * also trying to send IPIs.
  549                  */
  550                 DELAY(2 * bnp);
  551         }
  552         if (
  553 #ifdef KDB
  554             kdb_active ||
  555 #endif
  556             panicstr != NULL)
  557                 printf("%s: couldn't send IPI (cpus=0x%u ids=0x%lu)\n",
  558                     __func__, cpus, ids);
  559         else
  560                 panic("%s: couldn't send IPI", __func__);
  561 }
  562 
  563 void
  564 ipi_selected(u_int cpus, u_int ipi)
  565 {
  566 
  567         cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
  568 }
  569 
  570 void
  571 ipi_all(u_int ipi)
  572 {
  573 
  574         panic("%s", __func__);
  575 }
  576 
  577 void
  578 ipi_all_but_self(u_int ipi)
  579 {
  580 
  581         cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi);
  582 }

Cache object: b1d594e8a1fc7a8b9a9e3fb098c3e9c7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.