1 /*-
2 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
3 * reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * NETLOGIC_BSD */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/9.0/sys/mips/nlm/xlp_machdep.c 225394 2011-09-05 10:45:29Z jchandra $");
32
33 #include "opt_ddb.h"
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/rtprio.h>
39 #include <sys/systm.h>
40 #include <sys/interrupt.h>
41 #include <sys/limits.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/random.h>
46
47 #include <sys/cons.h> /* cinit() */
48 #include <sys/kdb.h>
49 #include <sys/reboot.h>
50 #include <sys/queue.h>
51 #include <sys/smp.h>
52 #include <sys/timetc.h>
53
54 #include <vm/vm.h>
55 #include <vm/vm_page.h>
56
57 #include <machine/cpu.h>
58 #include <machine/cpufunc.h>
59 #include <machine/cpuinfo.h>
60 #include <machine/tlb.h>
61 #include <machine/cpuregs.h>
62 #include <machine/frame.h>
63 #include <machine/hwfunc.h>
64 #include <machine/md_var.h>
65 #include <machine/asm.h>
66 #include <machine/pmap.h>
67 #include <machine/trap.h>
68 #include <machine/clock.h>
69 #include <machine/fls64.h>
70 #include <machine/intr_machdep.h>
71 #include <machine/smp.h>
72
73 #include <mips/nlm/hal/mips-extns.h>
74 #include <mips/nlm/hal/haldefs.h>
75 #include <mips/nlm/hal/iomap.h>
76 #include <mips/nlm/hal/sys.h>
77 #include <mips/nlm/hal/pic.h>
78 #include <mips/nlm/hal/uart.h>
79 #include <mips/nlm/hal/mmu.h>
80 #include <mips/nlm/hal/bridge.h>
81 #include <mips/nlm/hal/cpucontrol.h>
82
83 #include <mips/nlm/clock.h>
84 #include <mips/nlm/interrupt.h>
85 #include <mips/nlm/board.h>
86 #include <mips/nlm/xlp.h>
87
88 /* 4KB static data aread to keep a copy of the bootload env until
89 the dynamic kenv is setup */
90 char boot1_env[4096];
91 int xlp_argc;
92 char **xlp_argv, **xlp_envp;
93
94 uint64_t xlp_cpu_frequency;
95 uint64_t xlp_io_base = MIPS_PHYS_TO_KSEG1(XLP_DEFAULT_IO_BASE);
96
97 int xlp_ncores;
98 int xlp_threads_per_core;
99 uint32_t xlp_hw_thread_mask;
100 int xlp_cpuid_to_hwtid[MAXCPU];
101 int xlp_hwtid_to_cpuid[MAXCPU];
102 uint64_t xlp_pic_base;
103
104 static int xlp_mmuval;
105
106 extern uint32_t _end;
107 extern char XLPResetEntry[], XLPResetEntryEnd[];
108
109 static void
110 xlp_setup_core(void)
111 {
112 uint64_t reg;
113
114 reg = nlm_mfcr(LSU_DEFEATURE);
115 /* Enable Unaligned and L2HPE */
116 reg |= (1 << 30) | (1 << 23);
117 /*
118 * Experimental : Enable SUE
119 * Speculative Unmap Enable. Enable speculative L2 cache request for
120 * unmapped access.
121 */
122 reg |= (1ull << 31);
123 /* Clear S1RCM - A0 errata */
124 reg &= ~0xeull;
125 nlm_mtcr(LSU_DEFEATURE, reg);
126
127 reg = nlm_mfcr(SCHED_DEFEATURE);
128 /* Experimental: Disable BRU accepting ALU ops - A0 errata */
129 reg |= (1 << 24);
130 nlm_mtcr(SCHED_DEFEATURE, reg);
131 }
132
133 static void
134 xlp_setup_mmu(void)
135 {
136
137 nlm_setup_extended_pagemask(0); /* pagemask = 0 for 4K pages */
138 nlm_large_variable_tlb_en(0);
139 nlm_extended_tlb_en(1);
140 nlm_mmu_setup(0, 0, 0);
141 }
142
143 static void
144 xlp_parse_mmu_options(void)
145 {
146 int i, j, k;
147 uint32_t cpu_map = xlp_hw_thread_mask;
148 uint32_t core0_thr_mask, core_thr_mask;
149
150 #ifndef SMP /* Uniprocessor! */
151 if (cpu_map != 0x1) {
152 printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n"
153 "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map);
154 cpu_map = 0x1;
155 }
156 #endif
157
158 xlp_ncores = 1;
159 core0_thr_mask = cpu_map & 0xf;
160 switch (core0_thr_mask) {
161 case 1:
162 xlp_threads_per_core = 1;
163 xlp_mmuval = 0;
164 break;
165 case 3:
166 xlp_threads_per_core = 2;
167 xlp_mmuval = 2;
168 break;
169 case 0xf:
170 xlp_threads_per_core = 4;
171 xlp_mmuval = 3;
172 break;
173 default:
174 goto unsupp;
175 }
176
177 /* Verify other cores CPU masks */
178 for (i = 1; i < XLP_MAX_CORES; i++) {
179 core_thr_mask = (cpu_map >> (i*4)) & 0xf;
180 if (core_thr_mask) {
181 if (core_thr_mask != core0_thr_mask)
182 goto unsupp;
183 xlp_ncores++;
184 }
185 }
186
187 xlp_hw_thread_mask = cpu_map;
188 /* setup hardware processor id to cpu id mapping */
189 for (i = 0; i< MAXCPU; i++)
190 xlp_cpuid_to_hwtid[i] =
191 xlp_hwtid_to_cpuid [i] = -1;
192 for (i = 0, k = 0; i < XLP_MAX_CORES; i++) {
193 if (((cpu_map >> (i*4)) & 0xf) == 0)
194 continue;
195 for (j = 0; j < xlp_threads_per_core; j++) {
196 xlp_cpuid_to_hwtid[k] = i*4 + j;
197 xlp_hwtid_to_cpuid[i*4 + j] = k;
198 k++;
199 }
200 }
201
202 #ifdef SMP
203 /*
204 * We will enable the other threads in core 0 here
205 * so that the TLB and cache info is correct when
206 * mips_init runs
207 */
208 xlp_enable_threads(xlp_mmuval);
209 #endif
210 /* setup for the startup core */
211 xlp_setup_mmu();
212 return;
213
214 unsupp:
215 printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n"
216 "\tcore0 thread mask [%lx], boot cpu mask [%lx].\n",
217 (u_long)core0_thr_mask, (u_long)cpu_map);
218 panic("Invalid CPU mask - halting.\n");
219 return;
220 }
221
222 static void
223 xlp_set_boot_flags(void)
224 {
225 char *p;
226
227 p = getenv("bootflags");
228 if (p == NULL)
229 return;
230
231 for (; p && *p != '\0'; p++) {
232 switch (*p) {
233 case 'd':
234 case 'D':
235 boothowto |= RB_KDB;
236 break;
237 case 'g':
238 case 'G':
239 boothowto |= RB_GDB;
240 break;
241 case 'v':
242 case 'V':
243 boothowto |= RB_VERBOSE;
244 break;
245
246 case 's': /* single-user (default, supported for sanity) */
247 case 'S':
248 boothowto |= RB_SINGLE;
249 break;
250
251 default:
252 printf("Unrecognized boot flag '%c'.\n", *p);
253 break;
254 }
255 }
256
257 freeenv(p);
258 return;
259 }
260
261 static void
262 mips_init(void)
263 {
264 init_param1();
265 init_param2(physmem);
266
267 mips_cpu_init();
268 cpuinfo.cache_coherent_dma = TRUE;
269 pmap_bootstrap();
270 #ifdef DDB
271 kdb_init();
272 if (boothowto & RB_KDB) {
273 kdb_enter("Boot flags requested debugger", NULL);
274 }
275 #endif
276 mips_proc0_init();
277 mutex_init();
278 }
279
280 unsigned int
281 platform_get_timecount(struct timecounter *tc __unused)
282 {
283 uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER);
284
285 return (unsigned int)~count;
286 }
287
288 static void
289 xlp_pic_init(void)
290 {
291 struct timecounter pic_timecounter = {
292 platform_get_timecount, /* get_timecount */
293 0, /* no poll_pps */
294 ~0U, /* counter_mask */
295 XLP_IO_CLK, /* frequency */
296 "XLRPIC", /* name */
297 2000, /* quality (adjusted in code) */
298 };
299 int i;
300
301 xlp_pic_base = nlm_get_pic_regbase(0); /* TOOD: Add other nodes */
302 printf("Initializing PIC...@%jx\n", (uintmax_t)xlp_pic_base);
303 /* Bind all PIC irqs to cpu 0 */
304 for(i = 0; i < PIC_NUM_IRTS; i++) {
305 nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0,
306 1, 0, 0x1);
307 }
308
309 nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
310 platform_timecounter = &pic_timecounter;
311 }
312
313 #if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64_BIT */
314 #ifdef XLP_SIM
315 #define XLP_MEM_LIM 0x200000000ULL
316 #else
317 #define XLP_MEM_LIM 0x10000000000ULL
318 #endif
319 #else
320 #define XLP_MEM_LIM 0xfffff000UL
321 #endif
322 static void
323 xlp_mem_init(void)
324 {
325 uint64_t bridgebase = nlm_get_bridge_regbase(0); /* TOOD: Add other nodes */
326 vm_size_t physsz = 0;
327 uint64_t base, lim, val;
328 int i, j;
329
330 for (i = 0, j = 0; i < 8; i++) {
331 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i));
332 base = ((val >> 12) & 0xfffff) << 20;
333 val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i));
334 lim = ((val >> 12) & 0xfffff) << 20;
335
336 /* BAR not enabled */
337 if (lim == 0)
338 continue;
339
340 /* first bar, start a bit after end */
341 if (base == 0) {
342 base = (vm_paddr_t)MIPS_KSEG0_TO_PHYS(&_end) + 0x20000;
343 lim = 0x0c000000; /* TODO : hack to avoid uboot packet mem */
344 }
345 if (base >= XLP_MEM_LIM) {
346 printf("Mem [%d]: Ignore %#jx - %#jx\n", i,
347 (intmax_t)base, (intmax_t)lim);
348 continue;
349 }
350 if (lim > XLP_MEM_LIM) {
351 printf("Mem [%d]: Restrict %#jx -> %#jx\n", i,
352 (intmax_t)lim, (intmax_t)XLP_MEM_LIM);
353 lim = XLP_MEM_LIM;
354 }
355 if (lim <= base) {
356 printf("Mem[%d]: Malformed %#jx -> %#jx\n", i,
357 (intmax_t)base, (intmax_t)lim);
358 continue;
359 }
360
361 /*
362 * Exclude reset entry memory range 0x1fc00000 - 0x20000000
363 * from free memory
364 */
365 if (base <= 0x1fc00000 && (base + lim) > 0x1fc00000) {
366 uint64_t base0, lim0, base1, lim1;
367
368 base0 = base;
369 lim0 = 0x1fc00000;
370 base1 = 0x20000000;
371 lim1 = lim;
372
373 if (lim0 > base0) {
374 phys_avail[j++] = (vm_paddr_t)base0;
375 phys_avail[j++] = (vm_paddr_t)lim0;
376 physsz += lim0 - base0;
377 printf("Mem[%d]: %#jx - %#jx (excl reset)\n", i,
378 (intmax_t)base0, (intmax_t)lim0);
379 }
380 if (lim1 > base1) {
381 phys_avail[j++] = (vm_paddr_t)base1;
382 phys_avail[j++] = (vm_paddr_t)lim1;
383 physsz += lim1 - base1;
384 printf("Mem[%d]: %#jx - %#jx (excl reset)\n", i,
385 (intmax_t)base1, (intmax_t)lim1);
386 }
387 } else {
388 phys_avail[j++] = (vm_paddr_t)base;
389 phys_avail[j++] = (vm_paddr_t)lim;
390 physsz += lim - base;
391 printf("Mem[%d]: %#jx - %#jx\n", i,
392 (intmax_t)base, (intmax_t)lim);
393 }
394
395 }
396
397 /* setup final entry with 0 */
398 phys_avail[j] = phys_avail[j + 1] = 0;
399
400 /* copy phys_avail to dump_avail */
401 for(i = 0; i <= j + 1; i++)
402 dump_avail[i] = phys_avail[i];
403
404 realmem = physmem = btoc(physsz);
405 }
406
407 static uint32_t
408 xlp_get_cpu_frequency(void)
409 {
410 uint64_t sysbase = nlm_get_sys_regbase(0);
411 unsigned int pll_divf, pll_divr, dfs_div, num, denom;
412 uint32_t val;
413
414 val = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
415 pll_divf = (val >> 10) & 0x7f;
416 pll_divr = (val >> 8) & 0x3;
417 dfs_div = (val >> 17) & 0x3;
418
419 num = pll_divf + 1;
420 denom = 3 * (pll_divr + 1) * (1<< (dfs_div + 1));
421 val = 800000000ULL * num / denom;
422 return (val);
423 }
424
425 void
426 platform_start(__register_t a0 __unused,
427 __register_t a1 __unused,
428 __register_t a2 __unused,
429 __register_t a3 __unused)
430 {
431 int i;
432
433 xlp_argc = 1;
434 /*
435 * argv and envp are passed in array of 32bit pointers
436 */
437 xlp_argv = NULL;
438 xlp_envp = NULL;
439
440 /* Initialize pcpu stuff */
441 mips_pcpu0_init();
442
443 /* initialize console so that we have printf */
444 boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */
445
446 /* For now */
447 boothowto |= RB_VERBOSE;
448 boothowto |= RB_SINGLE;
449 bootverbose++;
450
451 /* clockrate used by delay, so initialize it here */
452 xlp_cpu_frequency = xlp_get_cpu_frequency();
453 cpu_clock = xlp_cpu_frequency / 1000000;
454 mips_timer_early_init(xlp_cpu_frequency);
455
456 /* Init console please */
457 cninit();
458
459 /* Environment */
460 printf("Args %#jx %#jx %#jx %#jx:\n", (intmax_t)a0,
461 (intmax_t)a1, (intmax_t)a2, (intmax_t)a3);
462 xlp_hw_thread_mask = a0;
463 init_static_kenv(boot1_env, sizeof(boot1_env));
464 printf("Environment (from %d args):\n", xlp_argc - 1);
465 if (xlp_argc == 1)
466 printf("\tNone\n");
467 for (i = 1; i < xlp_argc; i++) {
468 char *n, *arg;
469
470 arg = (char *)(intptr_t)xlp_argv[i];
471 printf("\t%s\n", arg);
472 n = strsep(&arg, "=");
473 if (arg == NULL)
474 setenv(n, "1");
475 else
476 setenv(n, arg);
477 }
478
479 /* Early core init and fixes for errata */
480 xlp_setup_core();
481
482 xlp_set_boot_flags();
483 xlp_parse_mmu_options();
484
485 xlp_mem_init();
486
487 bcopy(XLPResetEntry, (void *)MIPS_RESET_EXC_VEC,
488 XLPResetEntryEnd - XLPResetEntry);
489
490 /*
491 * MIPS generic init
492 */
493 mips_init();
494 /*
495 * XLP specific post initialization
496 * initialize other on chip stuff
497 */
498 nlm_board_info_setup();
499 xlp_pic_init();
500
501 mips_timer_init_params(xlp_cpu_frequency, 0);
502 }
503
504 void
505 platform_cpu_init()
506 {
507 }
508
509 void
510 platform_identify(void)
511 {
512
513 printf("XLP Eval Board\n");
514 }
515
516 /*
517 * XXX Maybe return the state of the watchdog in enter, and pass it to
518 * exit? Like spl().
519 */
520 void
521 platform_trap_enter(void)
522 {
523 }
524
525 void
526 platform_reset(void)
527 {
528 uint64_t sysbase = nlm_get_sys_regbase(0);
529
530 nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
531 for(;;)
532 __asm __volatile("wait");
533 }
534
535 void
536 platform_trap_exit(void)
537 {
538 }
539
540 #ifdef SMP
541 /*
542 * XLP threads are started simultaneously when we enable threads, this will
543 * ensure that the threads are blocked in platform_init_ap, until they are
544 * ready to proceed to smp_init_secondary()
545 */
546 static volatile int thr_unblock[4];
547
548 int
549 platform_start_ap(int cpuid)
550 {
551 uint32_t coremask, val;
552 uint64_t sysbase = nlm_get_sys_regbase(0);
553 int hwtid = xlp_cpuid_to_hwtid[cpuid];
554 int core, thr;
555
556 core = hwtid / 4;
557 thr = hwtid % 4;
558 if (thr == 0) {
559 /* First thread in core, do core wake up */
560 coremask = 1u << core;
561
562 /* Enable core clock */
563 val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
564 val &= ~coremask;
565 nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val);
566
567 /* Remove CPU Reset */
568 val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
569 val &= ~coremask & 0xff;
570 nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val);
571
572 if (bootverbose)
573 printf("Waking up core %d ...", core);
574
575 /* Poll for CPU to mark itself coherent */
576 do {
577 val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
578 } while ((val & coremask) != 0);
579 if (bootverbose)
580 printf("Done\n");
581 } else {
582 /* otherwise release the threads stuck in platform_init_ap */
583 thr_unblock[thr] = 1;
584 }
585
586 return (0);
587 }
588
589 void
590 platform_init_ap(int cpuid)
591 {
592 uint32_t stat;
593 int thr;
594
595 /* The first thread has to setup the MMU and enable other threads */
596 thr = nlm_threadid();
597 if (thr == 0) {
598 xlp_setup_core();
599 xlp_enable_threads(xlp_mmuval);
600 xlp_setup_mmu();
601 } else {
602 /*
603 * FIXME busy wait here eats too many cycles, especially
604 * in the core 0 while bootup
605 */
606 while (thr_unblock[thr] == 0)
607 __asm__ __volatile__ ("nop;nop;nop;nop");
608 thr_unblock[thr] = 0;
609 }
610
611 stat = mips_rd_status();
612 KASSERT((stat & MIPS_SR_INT_IE) == 0,
613 ("Interrupts enabled in %s!", __func__));
614 stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT;
615 mips_wr_status(stat);
616
617 nlm_write_c0_eimr(0ull);
618 xlp_enable_irq(IRQ_IPI);
619 xlp_enable_irq(IRQ_TIMER);
620 xlp_enable_irq(IRQ_MSGRING);
621
622 return;
623 }
624
625 int
626 platform_ipi_intrnum(void)
627 {
628
629 return (IRQ_IPI);
630 }
631
632 void
633 platform_ipi_send(int cpuid)
634 {
635
636 nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid],
637 platform_ipi_intrnum(), 0);
638 }
639
640 void
641 platform_ipi_clear(void)
642 {
643 }
644
645 int
646 platform_processor_id(void)
647 {
648
649 return (xlp_hwtid_to_cpuid[nlm_cpuid()]);
650 }
651
652 void
653 platform_cpu_mask(cpuset_t *mask)
654 {
655 int i, s;
656
657 CPU_ZERO(mask);
658 s = xlp_ncores * xlp_threads_per_core;
659 for (i = 0; i < s; i++)
660 CPU_SET(i, mask);
661 }
662
663 struct cpu_group *
664 platform_smp_topo()
665 {
666
667 return (smp_topo_2level(CG_SHARE_L2, xlp_ncores, CG_SHARE_L1,
668 xlp_threads_per_core, CG_FLAG_THREAD));
669 }
670 #endif
Cache object: f0625134830c54cbdf63c0ccd07399dc
|