1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
5 * reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * NETLOGIC_BSD */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_ddb.h"
36 #include "opt_platform.h"
37
38 #include <sys/param.h>
39 #include <sys/bus.h>
40 #include <sys/conf.h>
41 #include <sys/rtprio.h>
42 #include <sys/systm.h>
43 #include <sys/interrupt.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/random.h>
49
50 #include <sys/cons.h> /* cinit() */
51 #include <sys/kdb.h>
52 #include <sys/boot.h>
53 #include <sys/reboot.h>
54 #include <sys/queue.h>
55 #include <sys/smp.h>
56 #include <sys/timetc.h>
57
58 #include <vm/vm.h>
59 #include <vm/vm_page.h>
60
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 #include <machine/cpuinfo.h>
64 #include <machine/tlb.h>
65 #include <machine/cpuregs.h>
66 #include <machine/frame.h>
67 #include <machine/hwfunc.h>
68 #include <machine/md_var.h>
69 #include <machine/asm.h>
70 #include <machine/pmap.h>
71 #include <machine/trap.h>
72 #include <machine/clock.h>
73 #include <machine/fls64.h>
74 #include <machine/intr_machdep.h>
75 #include <machine/smp.h>
76
77 #include <mips/nlm/hal/mips-extns.h>
78 #include <mips/nlm/hal/haldefs.h>
79 #include <mips/nlm/hal/iomap.h>
80 #include <mips/nlm/hal/sys.h>
81 #include <mips/nlm/hal/pic.h>
82 #include <mips/nlm/hal/uart.h>
83 #include <mips/nlm/hal/mmu.h>
84 #include <mips/nlm/hal/bridge.h>
85 #include <mips/nlm/hal/cpucontrol.h>
86 #include <mips/nlm/hal/cop2.h>
87
88 #include <mips/nlm/clock.h>
89 #include <mips/nlm/interrupt.h>
90 #include <mips/nlm/board.h>
91 #include <mips/nlm/xlp.h>
92 #include <mips/nlm/msgring.h>
93
94 #ifdef FDT
95 #include <dev/fdt/fdt_common.h>
96 #include <dev/ofw/openfirm.h>
97 #endif
98
99 /* 4KB static data aread to keep a copy of the bootload env until
100 the dynamic kenv is setup */
101 char boot1_env[4096];
102
103 uint64_t xlp_cpu_frequency;
104 uint64_t xlp_io_base = MIPS_PHYS_TO_DIRECT_UNCACHED(XLP_DEFAULT_IO_BASE);
105
106 int xlp_ncores;
107 int xlp_threads_per_core;
108 uint32_t xlp_hw_thread_mask;
109 int xlp_cpuid_to_hwtid[MAXCPU];
110 int xlp_hwtid_to_cpuid[MAXCPU];
111 uint64_t xlp_pic_base;
112
113 static int xlp_mmuval;
114
115 extern uint32_t _end;
116 extern char XLPResetEntry[], XLPResetEntryEnd[];
117
118 static void
119 xlp_setup_core(void)
120 {
121 uint64_t reg;
122
123 reg = nlm_mfcr(LSU_DEFEATURE);
124 /* Enable Unaligned and L2HPE */
125 reg |= (1 << 30) | (1 << 23);
126 /*
127 * Experimental : Enable SUE
128 * Speculative Unmap Enable. Enable speculative L2 cache request for
129 * unmapped access.
130 */
131 reg |= (1ull << 31);
132 /* Clear S1RCM - A0 errata */
133 reg &= ~0xeull;
134 nlm_mtcr(LSU_DEFEATURE, reg);
135
136 reg = nlm_mfcr(SCHED_DEFEATURE);
137 /* Experimental: Disable BRU accepting ALU ops - A0 errata */
138 reg |= (1 << 24);
139 nlm_mtcr(SCHED_DEFEATURE, reg);
140 }
141
142 static void
143 xlp_setup_mmu(void)
144 {
145 uint32_t pagegrain;
146
147 if (nlm_threadid() == 0) {
148 nlm_setup_extended_pagemask(0);
149 nlm_large_variable_tlb_en(1);
150 nlm_extended_tlb_en(1);
151 nlm_mmu_setup(0, 0, 0);
152 }
153
154 /* Enable no-read, no-exec, large-physical-address */
155 pagegrain = mips_rd_pagegrain();
156 pagegrain |= (1U << 31) | /* RIE */
157 (1 << 30) | /* XIE */
158 (1 << 29); /* ELPA */
159 mips_wr_pagegrain(pagegrain);
160 }
161
162 static void
163 xlp_enable_blocks(void)
164 {
165 uint64_t sysbase;
166 int i;
167
168 for (i = 0; i < XLP_MAX_NODES; i++) {
169 if (!nlm_dev_exists(XLP_IO_SYS_OFFSET(i)))
170 continue;
171 sysbase = nlm_get_sys_regbase(i);
172 nlm_sys_enable_block(sysbase, DFS_DEVICE_RSA);
173 }
174 }
175
176 static void
177 xlp_parse_mmu_options(void)
178 {
179 uint64_t sysbase;
180 uint32_t cpu_map = xlp_hw_thread_mask;
181 uint32_t core0_thr_mask, core_thr_mask, cpu_rst_mask;
182 int i, j, k;
183
184 #ifdef SMP
185 if (cpu_map == 0)
186 cpu_map = 0xffffffff;
187 #else /* Uniprocessor! */
188 if (cpu_map == 0)
189 cpu_map = 0x1;
190 else if (cpu_map != 0x1) {
191 printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n"
192 "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map);
193 cpu_map = 0x1;
194 }
195 #endif
196
197 xlp_ncores = 1;
198 core0_thr_mask = cpu_map & 0xf;
199 switch (core0_thr_mask) {
200 case 1:
201 xlp_threads_per_core = 1;
202 xlp_mmuval = 0;
203 break;
204 case 3:
205 xlp_threads_per_core = 2;
206 xlp_mmuval = 2;
207 break;
208 case 0xf:
209 xlp_threads_per_core = 4;
210 xlp_mmuval = 3;
211 break;
212 default:
213 goto unsupp;
214 }
215
216 /* Try to find the enabled cores from SYS block */
217 sysbase = nlm_get_sys_regbase(0);
218 cpu_rst_mask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET) & 0xff;
219
220 /* XLP 416 does not report this correctly, fix */
221 if (nlm_processor_id() == CHIP_PROCESSOR_ID_XLP_416)
222 cpu_rst_mask = 0xe;
223
224 /* Take out cores which do not exist on chip */
225 for (i = 1; i < XLP_MAX_CORES; i++) {
226 if ((cpu_rst_mask & (1 << i)) == 0)
227 cpu_map &= ~(0xfu << (4 * i));
228 }
229
230 /* Verify other cores' CPU masks */
231 for (i = 1; i < XLP_MAX_CORES; i++) {
232 core_thr_mask = (cpu_map >> (4 * i)) & 0xf;
233 if (core_thr_mask == 0)
234 continue;
235 if (core_thr_mask != core0_thr_mask)
236 goto unsupp;
237 xlp_ncores++;
238 }
239
240 xlp_hw_thread_mask = cpu_map;
241 /* setup hardware processor id to cpu id mapping */
242 for (i = 0; i< MAXCPU; i++)
243 xlp_cpuid_to_hwtid[i] =
244 xlp_hwtid_to_cpuid[i] = -1;
245 for (i = 0, k = 0; i < XLP_MAX_CORES; i++) {
246 if (((cpu_map >> (i * 4)) & 0xf) == 0)
247 continue;
248 for (j = 0; j < xlp_threads_per_core; j++) {
249 xlp_cpuid_to_hwtid[k] = i * 4 + j;
250 xlp_hwtid_to_cpuid[i * 4 + j] = k;
251 k++;
252 }
253 }
254
255 return;
256
257 unsupp:
258 printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n"
259 "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n",
260 (u_long)core0_thr_mask, (u_long)cpu_map);
261 panic("Invalid CPU mask - halting.\n");
262 return;
263 }
264
265 #ifdef FDT
266 static void
267 xlp_bootargs_init(__register_t arg)
268 {
269 char buf[2048]; /* early stack is big enough */
270 void *dtbp;
271 phandle_t chosen;
272 ihandle_t mask;
273
274 dtbp = (void *)(intptr_t)arg;
275 #if defined(FDT_DTB_STATIC)
276 /*
277 * In case the device tree blob was not passed as argument try
278 * to use the statically embedded one.
279 */
280 if (dtbp == NULL)
281 dtbp = &fdt_static_dtb;
282 #endif
283 if (OF_install(OFW_FDT, 0) == FALSE)
284 while (1);
285 if (OF_init((void *)dtbp) != 0)
286 while (1);
287 OF_interpret("perform-fixup", 0);
288
289 chosen = OF_finddevice("/chosen");
290 if (OF_getprop(chosen, "cpumask", &mask, sizeof(mask)) != -1) {
291 xlp_hw_thread_mask = mask;
292 }
293
294 if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1)
295 boothowto |= boot_parse_cmdline(buf);
296 }
297 #else
298 /*
299 * arg is a pointer to the environment block, the format of the block is
300 * a=xyz\0b=pqr\0\0
301 */
302 static void
303 xlp_bootargs_init(__register_t arg)
304 {
305 char buf[2048]; /* early stack is big enough */
306 char *p, *v, *n;
307 uint32_t mask;
308
309 /*
310 * provide backward compat for passing cpu mask as arg
311 */
312 if (arg & 1) {
313 xlp_hw_thread_mask = arg;
314 return;
315 }
316
317 p = (void *)(intptr_t)arg;
318 while (*p != '\0') {
319 strlcpy(buf, p, sizeof(buf));
320 v = buf;
321 n = strsep(&v, "=");
322 if (v == NULL)
323 kern_setenv(n, "1");
324 else
325 kern_setenv(n, v);
326 p += strlen(p) + 1;
327 }
328
329 /* CPU mask can be passed thru env */
330 if (getenv_uint("cpumask", &mask) != 0)
331 xlp_hw_thread_mask = mask;
332
333 /* command line argument */
334 v = kern_getenv("bootargs");
335 if (v != NULL) {
336 strlcpy(buf, v, sizeof(buf));
337 boothowto |= boot_parse_cmdline(buf);
338 freeenv(v);
339 }
340 }
341 #endif
342
343 static void
344 mips_init(void)
345 {
346 init_param1();
347 init_param2(physmem);
348
349 mips_cpu_init();
350 cpuinfo.cache_coherent_dma = TRUE;
351 pmap_bootstrap();
352 mips_proc0_init();
353 mutex_init();
354 #ifdef DDB
355 kdb_init();
356 if (boothowto & RB_KDB) {
357 kdb_enter("Boot flags requested debugger", NULL);
358 }
359 #endif
360 }
361
362 unsigned int
363 platform_get_timecount(struct timecounter *tc __unused)
364 {
365 uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER);
366
367 return (unsigned int)~count;
368 }
369
370 static void
371 xlp_pic_init(void)
372 {
373 struct timecounter pic_timecounter = {
374 platform_get_timecount, /* get_timecount */
375 0, /* no poll_pps */
376 ~0U, /* counter_mask */
377 XLP_IO_CLK, /* frequency */
378 "XLRPIC", /* name */
379 2000, /* quality (adjusted in code) */
380 };
381 int i;
382 int maxirt;
383
384 xlp_pic_base = nlm_get_pic_regbase(0); /* TOOD: Add other nodes */
385 maxirt = nlm_read_reg(nlm_get_pic_pcibase(nlm_nodeid()),
386 XLP_PCI_DEVINFO_REG0);
387 printf("Initializing PIC...@%jx %d IRTs\n", (uintmax_t)xlp_pic_base,
388 maxirt);
389 /* Bind all PIC irqs to cpu 0 */
390 for (i = 0; i < maxirt; i++)
391 nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0,
392 1, 0, 0x1);
393
394 nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
395 platform_timecounter = &pic_timecounter;
396 }
397
398 #if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */
399 #ifdef XLP_SIM
400 #define XLP_MEM_LIM 0x200000000ULL
401 #else
402 #define XLP_MEM_LIM 0x10000000000ULL
403 #endif
404 #else
405 #define XLP_MEM_LIM 0xfffff000UL
406 #endif
407 static vm_paddr_t xlp_mem_excl[] = {
408 0, 0, /* for kernel image region, see xlp_mem_init */
409 0x0c000000, 0x14000000, /* uboot area, cms queue and other stuff */
410 0x1fc00000, 0x1fd00000, /* reset vec */
411 0x1e000000, 0x1e200000, /* poe buffers */
412 };
413
414 static int
415 mem_exclude_add(vm_paddr_t *avail, vm_paddr_t mstart, vm_paddr_t mend)
416 {
417 int i, pos;
418
419 pos = 0;
420 for (i = 0; i < nitems(xlp_mem_excl); i += 2) {
421 if (mstart > xlp_mem_excl[i + 1])
422 continue;
423 if (mstart < xlp_mem_excl[i]) {
424 avail[pos++] = mstart;
425 if (mend < xlp_mem_excl[i])
426 avail[pos++] = mend;
427 else
428 avail[pos++] = xlp_mem_excl[i];
429 }
430 mstart = xlp_mem_excl[i + 1];
431 if (mend <= mstart)
432 break;
433 }
434 if (mstart < mend) {
435 avail[pos++] = mstart;
436 avail[pos++] = mend;
437 }
438 return (pos);
439 }
440
441 static void
442 xlp_mem_init(void)
443 {
444 vm_paddr_t physsz, tmp;
445 uint64_t bridgebase, base, lim, val;
446 int i, j, k, n;
447
448 /* update kernel image area in exclude regions */
449 tmp = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end);
450 tmp = round_page(tmp) + 0x20000; /* round up */
451 xlp_mem_excl[1] = tmp;
452
453 printf("Memory (from DRAM BARs):\n");
454 bridgebase = nlm_get_bridge_regbase(0); /* TODO: Add other nodes */
455 physsz = 0;
456 for (i = 0, j = 0; i < 8; i++) {
457 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i));
458 val = (val >> 12) & 0xfffff;
459 base = val << 20;
460 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i));
461 val = (val >> 12) & 0xfffff;
462 if (val == 0) /* BAR not enabled */
463 continue;
464 lim = (val + 1) << 20;
465 printf(" BAR %d: %#jx - %#jx : ", i, (intmax_t)base,
466 (intmax_t)lim);
467
468 if (lim <= base) {
469 printf("\tskipped - malformed %#jx -> %#jx\n",
470 (intmax_t)base, (intmax_t)lim);
471 continue;
472 } else if (base >= XLP_MEM_LIM) {
473 printf(" skipped - outside usable limit %#jx.\n",
474 (intmax_t)XLP_MEM_LIM);
475 continue;
476 } else if (lim >= XLP_MEM_LIM) {
477 lim = XLP_MEM_LIM;
478 printf(" truncated to %#jx.\n", (intmax_t)XLP_MEM_LIM);
479 } else
480 printf(" usable\n");
481
482 /* exclude unusable regions from BAR and add rest */
483 n = mem_exclude_add(&phys_avail[j], base, lim);
484 for (k = j; k < j + n; k += 2) {
485 physsz += phys_avail[k + 1] - phys_avail[k];
486 printf("\tMem[%d]: %#jx - %#jx\n", k/2,
487 (intmax_t)phys_avail[k], (intmax_t)phys_avail[k+1]);
488 }
489 j = k;
490 }
491
492 /* setup final entry with 0 */
493 phys_avail[j] = phys_avail[j + 1] = 0;
494
495 /* copy phys_avail to dump_avail */
496 for (i = 0; i <= j + 1; i++)
497 dump_avail[i] = phys_avail[i];
498
499 realmem = physmem = btoc(physsz);
500 }
501
502 void
503 platform_start(__register_t a0 __unused,
504 __register_t a1 __unused,
505 __register_t a2 __unused,
506 __register_t a3 __unused)
507 {
508
509 /* Initialize pcpu stuff */
510 mips_pcpu0_init();
511
512 /* initialize console so that we have printf */
513 boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */
514
515 init_static_kenv(boot1_env, sizeof(boot1_env));
516 xlp_bootargs_init(a0);
517
518 /* clockrate used by delay, so initialize it here */
519 xlp_cpu_frequency = xlp_get_cpu_frequency(0, 0);
520 cpu_clock = xlp_cpu_frequency / 1000000;
521 mips_timer_early_init(xlp_cpu_frequency);
522
523 /* Init console please */
524 cninit();
525
526 /* Early core init and fixes for errata */
527 xlp_setup_core();
528
529 xlp_parse_mmu_options();
530 xlp_mem_init();
531
532 bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC,
533 XLPResetEntryEnd - XLPResetEntry);
534 #ifdef SMP
535 /*
536 * We will enable the other threads in core 0 here
537 * so that the TLB and cache info is correct when
538 * mips_init runs
539 */
540 xlp_enable_threads(xlp_mmuval);
541 #endif
542 /* setup for the startup core */
543 xlp_setup_mmu();
544
545 xlp_enable_blocks();
546
547 /* Read/Guess/setup board information */
548 nlm_board_info_setup();
549
550 /* MIPS generic init */
551 mips_init();
552
553 /*
554 * XLP specific post initialization
555 * initialize other on chip stuff
556 */
557 xlp_pic_init();
558
559 mips_timer_init_params(xlp_cpu_frequency, 0);
560 }
561
562 void
563 platform_cpu_init()
564 {
565 }
566
567 void
568 platform_reset(void)
569 {
570 uint64_t sysbase = nlm_get_sys_regbase(0);
571
572 nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
573 for( ; ; )
574 __asm __volatile("wait");
575 }
576
577 #ifdef SMP
578 /*
579 * XLP threads are started simultaneously when we enable threads, this will
580 * ensure that the threads are blocked in platform_init_ap, until they are
581 * ready to proceed to smp_init_secondary()
582 */
583 static volatile int thr_unblock[4];
584
585 int
586 platform_start_ap(int cpuid)
587 {
588 uint32_t coremask, val;
589 uint64_t sysbase = nlm_get_sys_regbase(0);
590 int hwtid = xlp_cpuid_to_hwtid[cpuid];
591 int core, thr;
592
593 core = hwtid / 4;
594 thr = hwtid % 4;
595 if (thr == 0) {
596 /* First thread in core, do core wake up */
597 coremask = 1u << core;
598
599 /* Enable core clock */
600 val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
601 val &= ~coremask;
602 nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val);
603
604 /* Remove CPU Reset */
605 val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
606 val &= ~coremask & 0xff;
607 nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val);
608
609 if (bootverbose)
610 printf("Waking up core %d ...", core);
611
612 /* Poll for CPU to mark itself coherent */
613 do {
614 val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
615 } while ((val & coremask) != 0);
616 if (bootverbose)
617 printf("Done\n");
618 } else {
619 /* otherwise release the threads stuck in platform_init_ap */
620 thr_unblock[thr] = 1;
621 }
622
623 return (0);
624 }
625
626 void
627 platform_init_ap(int cpuid)
628 {
629 uint32_t stat;
630 int thr;
631
632 /* The first thread has to setup the MMU and enable other threads */
633 thr = nlm_threadid();
634 if (thr == 0) {
635 xlp_setup_core();
636 xlp_enable_threads(xlp_mmuval);
637 } else {
638 /*
639 * FIXME busy wait here eats too many cycles, especially
640 * in the core 0 while bootup
641 */
642 while (thr_unblock[thr] == 0)
643 __asm__ __volatile__ ("nop;nop;nop;nop");
644 thr_unblock[thr] = 0;
645 }
646
647 xlp_setup_mmu();
648 stat = mips_rd_status();
649 KASSERT((stat & MIPS_SR_INT_IE) == 0,
650 ("Interrupts enabled in %s!", __func__));
651 stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT;
652 mips_wr_status(stat);
653
654 nlm_write_c0_eimr(0ull);
655 xlp_enable_irq(IRQ_IPI);
656 xlp_enable_irq(IRQ_TIMER);
657 xlp_enable_irq(IRQ_MSGRING);
658
659 return;
660 }
661
662 int
663 platform_ipi_hardintr_num(void)
664 {
665
666 return (IRQ_IPI);
667 }
668
669 int
670 platform_ipi_softintr_num(void)
671 {
672
673 return (-1);
674 }
675
676 void
677 platform_ipi_send(int cpuid)
678 {
679
680 nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid],
681 platform_ipi_hardintr_num(), 0);
682 }
683
684 void
685 platform_ipi_clear(void)
686 {
687 }
688
689 int
690 platform_processor_id(void)
691 {
692
693 return (xlp_hwtid_to_cpuid[nlm_cpuid()]);
694 }
695
696 void
697 platform_cpu_mask(cpuset_t *mask)
698 {
699 int i, s;
700
701 CPU_ZERO(mask);
702 s = xlp_ncores * xlp_threads_per_core;
703 for (i = 0; i < s; i++)
704 CPU_SET(i, mask);
705 }
706
707 struct cpu_group *
708 platform_smp_topo()
709 {
710
711 return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1,
712 xlp_threads_per_core, CG_FLAG_THREAD));
713 }
714 #endif
Cache object: 91bef07f6e264fa5c43b4501a072e4d8
|