The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/nlm/xlp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
    5  * reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions are
    9  * met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in
   15  *    the documentation and/or other materials provided with the
   16  *    distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   21  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
   22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   28  * THE POSSIBILITY OF SUCH DAMAGE.
   29  *
   30  * NETLOGIC_BSD */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include "opt_ddb.h"
   36 #include "opt_platform.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/bus.h>
   40 #include <sys/conf.h>
   41 #include <sys/rtprio.h>
   42 #include <sys/systm.h>
   43 #include <sys/interrupt.h>
   44 #include <sys/limits.h>
   45 #include <sys/lock.h>
   46 #include <sys/malloc.h>
   47 #include <sys/mutex.h>
   48 #include <sys/random.h>
   49 
   50 #include <sys/cons.h>           /* cinit() */
   51 #include <sys/kdb.h>
   52 #include <sys/boot.h>
   53 #include <sys/reboot.h>
   54 #include <sys/queue.h>
   55 #include <sys/smp.h>
   56 #include <sys/timetc.h>
   57 
   58 #include <vm/vm.h>
   59 #include <vm/vm_param.h>
   60 #include <vm/vm_page.h>
   61 #include <vm/vm_phys.h>
   62 #include <vm/vm_dumpset.h>
   63 
   64 #include <machine/cpu.h>
   65 #include <machine/cpufunc.h>
   66 #include <machine/cpuinfo.h>
   67 #include <machine/tlb.h>
   68 #include <machine/cpuregs.h>
   69 #include <machine/frame.h>
   70 #include <machine/hwfunc.h>
   71 #include <machine/md_var.h>
   72 #include <machine/asm.h>
   73 #include <machine/pmap.h>
   74 #include <machine/trap.h>
   75 #include <machine/clock.h>
   76 #include <machine/fls64.h>
   77 #include <machine/intr_machdep.h>
   78 #include <machine/smp.h>
   79 
   80 #include <mips/nlm/hal/mips-extns.h>
   81 #include <mips/nlm/hal/haldefs.h>
   82 #include <mips/nlm/hal/iomap.h>
   83 #include <mips/nlm/hal/sys.h>
   84 #include <mips/nlm/hal/pic.h>
   85 #include <mips/nlm/hal/uart.h>
   86 #include <mips/nlm/hal/mmu.h>
   87 #include <mips/nlm/hal/bridge.h>
   88 #include <mips/nlm/hal/cpucontrol.h>
   89 #include <mips/nlm/hal/cop2.h>
   90 
   91 #include <mips/nlm/clock.h>
   92 #include <mips/nlm/interrupt.h>
   93 #include <mips/nlm/board.h>
   94 #include <mips/nlm/xlp.h>
   95 #include <mips/nlm/msgring.h>
   96 
   97 #ifdef FDT
   98 #include <dev/fdt/fdt_common.h>
   99 #include <dev/ofw/openfirm.h>
  100 #endif
  101 
  102 /* 4KB static data aread to keep a copy of the bootload env until
  103    the dynamic kenv is setup */
  104 char boot1_env[4096];
  105 
  106 uint64_t xlp_cpu_frequency;
  107 uint64_t xlp_io_base = MIPS_PHYS_TO_DIRECT_UNCACHED(XLP_DEFAULT_IO_BASE);
  108 
  109 int xlp_ncores;
  110 int xlp_threads_per_core;
  111 uint32_t xlp_hw_thread_mask;
  112 int xlp_cpuid_to_hwtid[MAXCPU];
  113 int xlp_hwtid_to_cpuid[MAXCPU];
  114 uint64_t xlp_pic_base;
  115 
  116 static int xlp_mmuval;
  117 
  118 extern uint32_t _end;
  119 extern char XLPResetEntry[], XLPResetEntryEnd[];
  120 
  121 static void
  122 xlp_setup_core(void)
  123 {
  124         uint64_t reg;
  125 
  126         reg = nlm_mfcr(LSU_DEFEATURE);
  127         /* Enable Unaligned and L2HPE */
  128         reg |= (1 << 30) | (1 << 23);
  129         /*
  130          * Experimental : Enable SUE
  131          * Speculative Unmap Enable. Enable speculative L2 cache request for
  132          * unmapped access.
  133          */
  134         reg |= (1ull << 31);
  135         /* Clear S1RCM  - A0 errata */
  136         reg &= ~0xeull;
  137         nlm_mtcr(LSU_DEFEATURE, reg);
  138 
  139         reg = nlm_mfcr(SCHED_DEFEATURE);
  140         /* Experimental: Disable BRU accepting ALU ops - A0 errata */
  141         reg |= (1 << 24);
  142         nlm_mtcr(SCHED_DEFEATURE, reg);
  143 }
  144 
  145 static void
  146 xlp_setup_mmu(void)
  147 {
  148         uint32_t pagegrain;
  149 
  150         if (nlm_threadid() == 0) {
  151                 nlm_setup_extended_pagemask(0);
  152                 nlm_large_variable_tlb_en(1);
  153                 nlm_extended_tlb_en(1);
  154                 nlm_mmu_setup(0, 0, 0);
  155         }
  156 
  157         /* Enable no-read, no-exec, large-physical-address */
  158         pagegrain = mips_rd_pagegrain();
  159         pagegrain |= (1U << 31) |       /* RIE */
  160             (1 << 30)           |       /* XIE */
  161             (1 << 29);                  /* ELPA */
  162         mips_wr_pagegrain(pagegrain);
  163 }
  164 
  165 static void
  166 xlp_enable_blocks(void)
  167 {
  168         uint64_t sysbase;
  169         int i;
  170 
  171         for (i = 0; i < XLP_MAX_NODES; i++) {
  172                 if (!nlm_dev_exists(XLP_IO_SYS_OFFSET(i)))
  173                         continue;
  174                 sysbase = nlm_get_sys_regbase(i);
  175                 nlm_sys_enable_block(sysbase, DFS_DEVICE_RSA);
  176         }
  177 }
  178 
  179 static void
  180 xlp_parse_mmu_options(void)
  181 {
  182         uint64_t sysbase;
  183         uint32_t cpu_map = xlp_hw_thread_mask;
  184         uint32_t core0_thr_mask, core_thr_mask, cpu_rst_mask;
  185         int i, j, k;
  186 
  187 #ifdef SMP
  188         if (cpu_map == 0)
  189                 cpu_map = 0xffffffff;
  190 #else /* Uniprocessor! */
  191         if (cpu_map == 0)
  192                 cpu_map = 0x1;
  193         else if (cpu_map != 0x1) {
  194                 printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n"
  195                     "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map);
  196                 cpu_map = 0x1;
  197         }
  198 #endif
  199 
  200         xlp_ncores = 1;
  201         core0_thr_mask = cpu_map & 0xf;
  202         switch (core0_thr_mask) {
  203         case 1:
  204                 xlp_threads_per_core = 1;
  205                 xlp_mmuval = 0;
  206                 break;
  207         case 3:
  208                 xlp_threads_per_core = 2;
  209                 xlp_mmuval = 2;
  210                 break;
  211         case 0xf:
  212                 xlp_threads_per_core = 4;
  213                 xlp_mmuval = 3;
  214                 break;
  215         default:
  216                 goto unsupp;
  217         }
  218 
  219         /* Try to find the enabled cores from SYS block */
  220         sysbase = nlm_get_sys_regbase(0);
  221         cpu_rst_mask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET) & 0xff;
  222 
  223         /* XLP 416 does not report this correctly, fix */
  224         if (nlm_processor_id() == CHIP_PROCESSOR_ID_XLP_416)
  225                 cpu_rst_mask = 0xe;
  226 
  227         /* Take out cores which do not exist on chip */
  228         for (i = 1; i < XLP_MAX_CORES; i++) {
  229                 if ((cpu_rst_mask & (1 << i)) == 0)
  230                         cpu_map &= ~(0xfu << (4 * i));
  231         }
  232 
  233         /* Verify other cores' CPU masks */
  234         for (i = 1; i < XLP_MAX_CORES; i++) {
  235                 core_thr_mask = (cpu_map >> (4 * i)) & 0xf;
  236                 if (core_thr_mask == 0)
  237                         continue;
  238                 if (core_thr_mask != core0_thr_mask)
  239                         goto unsupp;
  240                 xlp_ncores++;
  241         }
  242 
  243         xlp_hw_thread_mask = cpu_map;
  244         /* setup hardware processor id to cpu id mapping */
  245         for (i = 0; i< MAXCPU; i++)
  246                 xlp_cpuid_to_hwtid[i] =
  247                     xlp_hwtid_to_cpuid[i] = -1;
  248         for (i = 0, k = 0; i < XLP_MAX_CORES; i++) {
  249                 if (((cpu_map >> (i * 4)) & 0xf) == 0)
  250                         continue;
  251                 for (j = 0; j < xlp_threads_per_core; j++) {
  252                         xlp_cpuid_to_hwtid[k] = i * 4 + j;
  253                         xlp_hwtid_to_cpuid[i * 4 + j] = k;
  254                         k++;
  255                 }
  256         }
  257 
  258         return;
  259 
  260 unsupp:
  261         printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n"
  262             "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n",
  263             (u_long)core0_thr_mask, (u_long)cpu_map);
  264         panic("Invalid CPU mask - halting.\n");
  265         return;
  266 }
  267 
  268 #ifdef FDT
  269 static void
  270 xlp_bootargs_init(__register_t arg)
  271 {
  272         char    buf[2048]; /* early stack is big enough */
  273         void    *dtbp;
  274         phandle_t chosen;
  275         ihandle_t mask;
  276 
  277         dtbp = (void *)(intptr_t)arg;
  278 #if defined(FDT_DTB_STATIC)
  279         /*
  280          * In case the device tree blob was not passed as argument try
  281          * to use the statically embedded one.
  282          */
  283         if (dtbp == NULL)
  284                 dtbp = &fdt_static_dtb;
  285 #endif
  286         if (OF_install(OFW_FDT, 0) == FALSE)
  287                 while (1);
  288         if (OF_init((void *)dtbp) != 0)
  289                 while (1);
  290         OF_interpret("perform-fixup", 0);
  291 
  292         chosen = OF_finddevice("/chosen");
  293         if (OF_getprop(chosen, "cpumask", &mask, sizeof(mask)) != -1) {
  294                 xlp_hw_thread_mask = mask;
  295         }
  296 
  297         if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1)
  298                 boothowto |= boot_parse_cmdline(buf);
  299 }
  300 #else
  301 /*
  302  * arg is a pointer to the environment block, the format of the block is
  303  * a=xyz\0b=pqr\0\0
  304  */
  305 static void
  306 xlp_bootargs_init(__register_t arg)
  307 {
  308         char    buf[2048]; /* early stack is big enough */
  309         char    *p, *v, *n;
  310         uint32_t mask;
  311 
  312         /*
  313          * provide backward compat for passing cpu mask as arg
  314          */
  315         if (arg & 1) {
  316                 xlp_hw_thread_mask = arg;
  317                 return;
  318         }
  319 
  320         p = (void *)(intptr_t)arg;
  321         while (*p != '\0') {
  322                 strlcpy(buf, p, sizeof(buf));
  323                 v = buf;
  324                 n = strsep(&v, "=");
  325                 if (v == NULL)
  326                         kern_setenv(n, "1");
  327                 else
  328                         kern_setenv(n, v);
  329                 p += strlen(p) + 1;
  330         }
  331 
  332         /* CPU mask can be passed thru env */
  333         if (getenv_uint("cpumask", &mask) != 0)
  334                 xlp_hw_thread_mask = mask;
  335 
  336         /* command line argument */
  337         v = kern_getenv("bootargs");
  338         if (v != NULL) {
  339                 strlcpy(buf, v, sizeof(buf));
  340                 boothowto |= boot_parse_cmdline(buf);
  341                 freeenv(v);
  342         }
  343 }
  344 #endif
  345 
  346 static void
  347 mips_init(void)
  348 {
  349         init_param1();
  350         init_param2(physmem);
  351 
  352         mips_cpu_init();
  353         cpuinfo.cache_coherent_dma = TRUE;
  354         pmap_bootstrap();
  355         mips_proc0_init();
  356         mutex_init();
  357 #ifdef DDB
  358         kdb_init();
  359         if (boothowto & RB_KDB) {
  360                 kdb_enter("Boot flags requested debugger", NULL);
  361         }
  362 #endif
  363 }
  364 
  365 unsigned int
  366 platform_get_timecount(struct timecounter *tc __unused)
  367 {
  368         uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER);
  369 
  370         return (unsigned int)~count;
  371 }
  372 
  373 static void
  374 xlp_pic_init(void)
  375 {
  376         struct timecounter pic_timecounter = {
  377                 platform_get_timecount, /* get_timecount */
  378                 0,                      /* no poll_pps */
  379                 ~0U,                    /* counter_mask */
  380                 XLP_IO_CLK,            /* frequency */
  381                 "XLRPIC",               /* name */
  382                 2000,                   /* quality (adjusted in code) */
  383         };
  384         int i;
  385         int maxirt;
  386 
  387         xlp_pic_base = nlm_get_pic_regbase(0);  /* TOOD: Add other nodes */
  388         maxirt = nlm_read_reg(nlm_get_pic_pcibase(nlm_nodeid()),
  389             XLP_PCI_DEVINFO_REG0);
  390         printf("Initializing PIC...@%jx %d IRTs\n", (uintmax_t)xlp_pic_base,
  391             maxirt);
  392         /* Bind all PIC irqs to cpu 0 */
  393         for (i = 0; i < maxirt; i++)
  394             nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0,
  395             1, 0, 0x1);
  396 
  397         nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
  398         platform_timecounter = &pic_timecounter;
  399 }
  400 
  401 #if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */
  402 #ifdef XLP_SIM
  403 #define XLP_MEM_LIM     0x200000000ULL
  404 #else
  405 #define XLP_MEM_LIM     0x10000000000ULL
  406 #endif
  407 #else
  408 #define XLP_MEM_LIM     0xfffff000UL
  409 #endif
  410 static vm_paddr_t xlp_mem_excl[] = {
  411         0,          0,          /* for kernel image region, see xlp_mem_init */
  412         0x0c000000, 0x14000000, /* uboot area, cms queue and other stuff */
  413         0x1fc00000, 0x1fd00000, /* reset vec */
  414         0x1e000000, 0x1e200000, /* poe buffers */
  415 };
  416 
  417 static int
  418 mem_exclude_add(vm_paddr_t *avail, vm_paddr_t mstart, vm_paddr_t mend)
  419 {
  420         int i, pos;
  421 
  422         pos = 0;
  423         for (i = 0; i < nitems(xlp_mem_excl); i += 2) {
  424                 if (mstart > xlp_mem_excl[i + 1])
  425                         continue;
  426                 if (mstart < xlp_mem_excl[i]) {
  427                         avail[pos++] = mstart;
  428                         if (mend < xlp_mem_excl[i])
  429                                 avail[pos++] = mend;
  430                         else
  431                                 avail[pos++] = xlp_mem_excl[i];
  432                 }
  433                 mstart = xlp_mem_excl[i + 1];
  434                 if (mend <= mstart)
  435                         break;
  436         }
  437         if (mstart < mend) {
  438                 avail[pos++] = mstart;
  439                 avail[pos++] = mend;
  440         }
  441         return (pos);
  442 }
  443 
  444 static void
  445 xlp_mem_init(void)
  446 {
  447         vm_paddr_t physsz, tmp;
  448         uint64_t bridgebase, base, lim, val;
  449         int i, j, k, n;
  450 
  451         /* update kernel image area in exclude regions */
  452         tmp = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end);
  453         tmp = round_page(tmp) + 0x20000; /* round up */
  454         xlp_mem_excl[1] = tmp;
  455 
  456         printf("Memory (from DRAM BARs):\n");
  457         bridgebase = nlm_get_bridge_regbase(0); /* TODO: Add other nodes */
  458         physsz = 0;
  459         for (i = 0, j = 0; i < 8; i++) {
  460                 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i));
  461                 val = (val >>  12) & 0xfffff;
  462                 base = val << 20;
  463                 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i));
  464                 val = (val >>  12) & 0xfffff;
  465                 if (val == 0)   /* BAR not enabled */
  466                         continue;
  467                 lim = (val + 1) << 20;
  468                 printf("  BAR %d: %#jx - %#jx : ", i, (intmax_t)base,
  469                     (intmax_t)lim);
  470 
  471                 if (lim <= base) {
  472                         printf("\tskipped - malformed %#jx -> %#jx\n",
  473                             (intmax_t)base, (intmax_t)lim);
  474                         continue;
  475                 } else if (base >= XLP_MEM_LIM) {
  476                         printf(" skipped - outside usable limit %#jx.\n",
  477                             (intmax_t)XLP_MEM_LIM);
  478                         continue;
  479                 } else if (lim >= XLP_MEM_LIM) {
  480                         lim = XLP_MEM_LIM;
  481                         printf(" truncated to %#jx.\n", (intmax_t)XLP_MEM_LIM);
  482                 } else
  483                         printf(" usable\n");
  484 
  485                 /* exclude unusable regions from BAR and add rest */
  486                 n = mem_exclude_add(&phys_avail[j], base, lim);
  487                 for (k = j; k < j + n; k += 2) {
  488                         physsz += phys_avail[k + 1] - phys_avail[k];
  489                         printf("\tMem[%d]: %#jx - %#jx\n", k/2,
  490                             (intmax_t)phys_avail[k], (intmax_t)phys_avail[k+1]);
  491                 }
  492                 j = k;
  493         }
  494 
  495         /* setup final entry with 0 */
  496         phys_avail[j] = phys_avail[j + 1] = 0;
  497 
  498         /* copy phys_avail to dump_avail */
  499         for (i = 0; i <= j + 1; i++)
  500                 dump_avail[i] = phys_avail[i];
  501 
  502         realmem = physmem = btoc(physsz);
  503 }
  504 
  505 void
  506 platform_start(__register_t a0 __unused,
  507     __register_t a1 __unused,
  508     __register_t a2 __unused,
  509     __register_t a3 __unused)
  510 {
  511 
  512         /* Initialize pcpu stuff */
  513         mips_pcpu0_init();
  514 
  515         /* initialize console so that we have printf */
  516         boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */
  517 
  518         init_static_kenv(boot1_env, sizeof(boot1_env));
  519         xlp_bootargs_init(a0);
  520 
  521         /* clockrate used by delay, so initialize it here */
  522         xlp_cpu_frequency = xlp_get_cpu_frequency(0, 0);
  523         cpu_clock = xlp_cpu_frequency / 1000000;
  524         mips_timer_early_init(xlp_cpu_frequency);
  525 
  526         /* Init console please */
  527         cninit();
  528 
  529         /* Early core init and fixes for errata */
  530         xlp_setup_core();
  531 
  532         xlp_parse_mmu_options();
  533         xlp_mem_init();
  534 
  535         bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC,
  536               XLPResetEntryEnd - XLPResetEntry);
  537 #ifdef SMP
  538         /*
  539          * We will enable the other threads in core 0 here
  540          * so that the TLB and cache info is correct when
  541          * mips_init runs
  542          */
  543         xlp_enable_threads(xlp_mmuval);
  544 #endif
  545         /* setup for the startup core */
  546         xlp_setup_mmu();
  547 
  548         xlp_enable_blocks();
  549 
  550         /* Read/Guess/setup board information */
  551         nlm_board_info_setup();
  552 
  553         /* MIPS generic init */
  554         mips_init();
  555 
  556         /*
  557          * XLP specific post initialization
  558          * initialize other on chip stuff
  559          */
  560         xlp_pic_init();
  561 
  562         mips_timer_init_params(xlp_cpu_frequency, 0);
  563 }
  564 
  565 void
  566 platform_cpu_init()
  567 {
  568 }
  569 
  570 void
  571 platform_reset(void)
  572 {
  573         uint64_t sysbase = nlm_get_sys_regbase(0);
  574 
  575         nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
  576         for( ; ; )
  577                 __asm __volatile("wait");
  578 }
  579 
  580 #ifdef SMP
  581 /*
  582  * XLP threads are started simultaneously when we enable threads, this will
  583  * ensure that the threads are blocked in platform_init_ap, until they are
  584  * ready to proceed to smp_init_secondary()
  585  */
  586 static volatile int thr_unblock[4];
  587 
  588 int
  589 platform_start_ap(int cpuid)
  590 {
  591         uint32_t coremask, val;
  592         uint64_t sysbase = nlm_get_sys_regbase(0);
  593         int hwtid = xlp_cpuid_to_hwtid[cpuid];
  594         int core, thr;
  595 
  596         core = hwtid / 4;
  597         thr = hwtid % 4;
  598         if (thr == 0) {
  599                 /* First thread in core, do core wake up */
  600                 coremask = 1u << core;
  601 
  602                 /* Enable core clock */
  603                 val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
  604                 val &= ~coremask;
  605                 nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val);
  606 
  607                 /* Remove CPU Reset */
  608                 val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
  609                 val &=  ~coremask & 0xff;
  610                 nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val);
  611 
  612                 if (bootverbose)
  613                         printf("Waking up core %d ...", core);
  614 
  615                 /* Poll for CPU to mark itself coherent */
  616                 do {
  617                         val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
  618                 } while ((val & coremask) != 0);
  619                 if (bootverbose)
  620                         printf("Done\n");
  621         } else {
  622                 /* otherwise release the threads stuck in platform_init_ap */
  623                 thr_unblock[thr] = 1;
  624         }
  625 
  626         return (0);
  627 }
  628 
  629 void
  630 platform_init_ap(int cpuid)
  631 {
  632         uint32_t stat;
  633         int thr;
  634 
  635         /* The first thread has to setup the MMU and enable other threads */
  636         thr = nlm_threadid();
  637         if (thr == 0) {
  638                 xlp_setup_core();
  639                 xlp_enable_threads(xlp_mmuval);
  640         } else {
  641                 /*
  642                  * FIXME busy wait here eats too many cycles, especially
  643                  * in the core 0 while bootup
  644                  */
  645                 while (thr_unblock[thr] == 0)
  646                         __asm__ __volatile__ ("nop;nop;nop;nop");
  647                 thr_unblock[thr] = 0;
  648         }
  649 
  650         xlp_setup_mmu();
  651         stat = mips_rd_status();
  652         KASSERT((stat & MIPS_SR_INT_IE) == 0,
  653             ("Interrupts enabled in %s!", __func__));
  654         stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT;
  655         mips_wr_status(stat);
  656 
  657         nlm_write_c0_eimr(0ull);
  658         xlp_enable_irq(IRQ_IPI);
  659         xlp_enable_irq(IRQ_TIMER);
  660         xlp_enable_irq(IRQ_MSGRING);
  661 
  662         return;
  663 }
  664 
  665 int
  666 platform_ipi_hardintr_num(void)
  667 {
  668 
  669         return (IRQ_IPI);
  670 }
  671 
  672 int
  673 platform_ipi_softintr_num(void)
  674 {
  675 
  676         return (-1);
  677 }
  678 
  679 void
  680 platform_ipi_send(int cpuid)
  681 {
  682 
  683         nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid],
  684             platform_ipi_hardintr_num(), 0);
  685 }
  686 
  687 void
  688 platform_ipi_clear(void)
  689 {
  690 }
  691 
  692 int
  693 platform_processor_id(void)
  694 {
  695 
  696         return (xlp_hwtid_to_cpuid[nlm_cpuid()]);
  697 }
  698 
  699 void
  700 platform_cpu_mask(cpuset_t *mask)
  701 {
  702         int i, s;
  703 
  704         CPU_ZERO(mask);
  705         s = xlp_ncores * xlp_threads_per_core;
  706         for (i = 0; i < s; i++)
  707                 CPU_SET(i, mask);
  708 }
  709 
  710 struct cpu_group *
  711 platform_smp_topo()
  712 {
  713 
  714         return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1,
  715                 xlp_threads_per_core, CG_FLAG_THREAD));
  716 }
  717 #endif

Cache object: 9a16ce534c01212e7b3bdd918e7e8d3d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.