1 /*-
2 * Copyright (c) 2006-2009 RMI Corporation
3 * Copyright (c) 2002-2004 Juli Mallett <jmallett@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/10.4/sys/mips/rmi/xlr_machdep.c 232853 2012-03-12 07:34:15Z jmallett $");
30
31 #include "opt_ddb.h"
32
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/rtprio.h>
37 #include <sys/systm.h>
38 #include <sys/interrupt.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/random.h>
44
45 #include <sys/cons.h> /* cinit() */
46 #include <sys/kdb.h>
47 #include <sys/reboot.h>
48 #include <sys/queue.h>
49 #include <sys/smp.h>
50 #include <sys/timetc.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_page.h>
54
55 #include <machine/cpu.h>
56 #include <machine/cpufunc.h>
57 #include <machine/cpuinfo.h>
58 #include <machine/cpuregs.h>
59 #include <machine/frame.h>
60 #include <machine/hwfunc.h>
61 #include <machine/md_var.h>
62 #include <machine/asm.h>
63 #include <machine/pmap.h>
64 #include <machine/trap.h>
65 #include <machine/clock.h>
66 #include <machine/fls64.h>
67 #include <machine/intr_machdep.h>
68 #include <machine/smp.h>
69
70 #include <mips/rmi/iomap.h>
71 #include <mips/rmi/msgring.h>
72 #include <mips/rmi/interrupt.h>
73 #include <mips/rmi/pic.h>
74 #include <mips/rmi/board.h>
75 #include <mips/rmi/rmi_mips_exts.h>
76 #include <mips/rmi/rmi_boot_info.h>
77
78 void mpwait(void);
79 unsigned long xlr_io_base = (unsigned long)(DEFAULT_XLR_IO_BASE);
80
81 /* 4KB static data aread to keep a copy of the bootload env until
82 the dynamic kenv is setup */
83 char boot1_env[4096];
84 int rmi_spin_mutex_safe=0;
85 struct mtx xlr_pic_lock;
86
87 /*
88 * Parameters from boot loader
89 */
90 struct boot1_info xlr_boot1_info;
91 int xlr_run_mode;
92 int xlr_argc;
93 int32_t *xlr_argv, *xlr_envp;
94 uint64_t cpu_mask_info;
95 uint32_t xlr_online_cpumask;
96 uint32_t xlr_core_cpu_mask = 0x1; /* Core 0 thread 0 is always there */
97
98 int xlr_shtlb_enabled;
99 int xlr_ncores;
100 int xlr_threads_per_core;
101 uint32_t xlr_hw_thread_mask;
102 int xlr_cpuid_to_hwtid[MAXCPU];
103 int xlr_hwtid_to_cpuid[MAXCPU];
104
105 static void
106 xlr_setup_mmu_split(void)
107 {
108 uint64_t mmu_setup;
109 int val = 0;
110
111 if (xlr_threads_per_core == 4 && xlr_shtlb_enabled == 0)
112 return; /* no change from boot setup */
113
114 switch (xlr_threads_per_core) {
115 case 1:
116 val = 0; break;
117 case 2:
118 val = 2; break;
119 case 4:
120 val = 3; break;
121 }
122
123 mmu_setup = read_xlr_ctrl_register(4, 0);
124 mmu_setup = mmu_setup & ~0x06;
125 mmu_setup |= (val << 1);
126
127 /* turn on global mode */
128 if (xlr_shtlb_enabled)
129 mmu_setup |= 0x01;
130
131 write_xlr_ctrl_register(4, 0, mmu_setup);
132 }
133
134 static void
135 xlr_parse_mmu_options(void)
136 {
137 #ifdef notyet
138 char *hw_env, *start, *end;
139 #endif
140 uint32_t cpu_map;
141 uint8_t core0_thr_mask, core_thr_mask;
142 int i, j, k;
143
144 /* First check for the shared TLB setup */
145 xlr_shtlb_enabled = 0;
146 #ifdef notyet
147 /*
148 * We don't support sharing TLB per core - TODO
149 */
150 xlr_shtlb_enabled = 0;
151 if ((hw_env = getenv("xlr.shtlb")) != NULL) {
152 start = hw_env;
153 tmp = strtoul(start, &end, 0);
154 if (start != end)
155 xlr_shtlb_enabled = (tmp != 0);
156 else
157 printf("Bad value for xlr.shtlb [%s]\n", hw_env);
158 freeenv(hw_env);
159 }
160 #endif
161 /*
162 * XLR supports splitting the 64 TLB entries across one, two or four
163 * threads (split mode). XLR also allows the 64 TLB entries to be shared
164 * across all threads in the core using a global flag (shared TLB mode).
165 * We will support 1/2/4 threads in split mode or shared mode.
166 *
167 */
168 xlr_ncores = 1;
169 cpu_map = xlr_boot1_info.cpu_online_map;
170
171 #ifndef SMP /* Uniprocessor! */
172 if (cpu_map != 0x1) {
173 printf("WARNING: Starting uniprocessor kernel on cpumask [0x%lx]!\n"
174 "WARNING: Other CPUs will be unused.\n", (u_long)cpu_map);
175 cpu_map = 0x1;
176 }
177 #endif
178 core0_thr_mask = cpu_map & 0xf;
179 switch (core0_thr_mask) {
180 case 1:
181 xlr_threads_per_core = 1; break;
182 case 3:
183 xlr_threads_per_core = 2; break;
184 case 0xf:
185 xlr_threads_per_core = 4; break;
186 default:
187 goto unsupp;
188 }
189
190 /* Verify other cores CPU masks */
191 for (i = 1; i < XLR_MAX_CORES; i++) {
192 core_thr_mask = (cpu_map >> (i*4)) & 0xf;
193 if (core_thr_mask) {
194 if (core_thr_mask != core0_thr_mask)
195 goto unsupp;
196 xlr_ncores++;
197 }
198 }
199 xlr_hw_thread_mask = cpu_map;
200
201 /* setup hardware processor id to cpu id mapping */
202 for (i = 0; i< MAXCPU; i++)
203 xlr_cpuid_to_hwtid[i] =
204 xlr_hwtid_to_cpuid [i] = -1;
205 for (i = 0, k = 0; i < XLR_MAX_CORES; i++) {
206 if (((cpu_map >> (i*4)) & 0xf) == 0)
207 continue;
208 for (j = 0; j < xlr_threads_per_core; j++) {
209 xlr_cpuid_to_hwtid[k] = i*4 + j;
210 xlr_hwtid_to_cpuid[i*4 + j] = k;
211 k++;
212 }
213 }
214
215 /* setup for the startup core */
216 xlr_setup_mmu_split();
217 return;
218
219 unsupp:
220 printf("ERROR : Unsupported CPU mask [use 1,2 or 4 threads per core].\n"
221 "\tcore0 thread mask [%lx], boot cpu mask [%lx]\n"
222 "\tUsing default, 16 TLB entries per CPU, split mode\n",
223 (u_long)core0_thr_mask, (u_long)cpu_map);
224 panic("Invalid CPU mask - halting.\n");
225 return;
226 }
227
228 static void
229 xlr_set_boot_flags(void)
230 {
231 char *p;
232
233 p = getenv("bootflags");
234 if (p == NULL)
235 p = getenv("boot_flags"); /* old style */
236 if (p == NULL)
237 return;
238
239 for (; p && *p != '\0'; p++) {
240 switch (*p) {
241 case 'd':
242 case 'D':
243 boothowto |= RB_KDB;
244 break;
245 case 'g':
246 case 'G':
247 boothowto |= RB_GDB;
248 break;
249 case 'v':
250 case 'V':
251 boothowto |= RB_VERBOSE;
252 break;
253
254 case 's': /* single-user (default, supported for sanity) */
255 case 'S':
256 boothowto |= RB_SINGLE;
257 break;
258
259 default:
260 printf("Unrecognized boot flag '%c'.\n", *p);
261 break;
262 }
263 }
264
265 freeenv(p);
266 return;
267 }
268 extern uint32_t _end;
269
270 static void
271 mips_init(void)
272 {
273 init_param1();
274 init_param2(physmem);
275
276 mips_cpu_init();
277 cpuinfo.cache_coherent_dma = TRUE;
278 pmap_bootstrap();
279 #ifdef DDB
280 kdb_init();
281 if (boothowto & RB_KDB) {
282 kdb_enter("Boot flags requested debugger", NULL);
283 }
284 #endif
285 mips_proc0_init();
286 mutex_init();
287 }
288
289 u_int
290 platform_get_timecount(struct timecounter *tc __unused)
291 {
292
293 return (0xffffffffU - pic_timer_count32(PIC_CLOCK_TIMER));
294 }
295
296 static void
297 xlr_pic_init(void)
298 {
299 struct timecounter pic_timecounter = {
300 platform_get_timecount, /* get_timecount */
301 0, /* no poll_pps */
302 ~0U, /* counter_mask */
303 PIC_TIMER_HZ, /* frequency */
304 "XLRPIC", /* name */
305 2000, /* quality (adjusted in code) */
306 };
307 xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET);
308 int i, irq;
309
310 write_c0_eimr64(0ULL);
311 mtx_init(&xlr_pic_lock, "pic", NULL, MTX_SPIN);
312 xlr_write_reg(mmio, PIC_CTRL, 0);
313
314 /* Initialize all IRT entries */
315 for (i = 0; i < PIC_NUM_IRTS; i++) {
316 irq = PIC_INTR_TO_IRQ(i);
317
318 /*
319 * Disable all IRTs. Set defaults (local scheduling, high
320 * polarity, level * triggered, and CPU irq)
321 */
322 xlr_write_reg(mmio, PIC_IRT_1(i), (1 << 30) | (1 << 6) | irq);
323 /* Bind all PIC irqs to cpu 0 */
324 xlr_write_reg(mmio, PIC_IRT_0(i), 0x01);
325 }
326
327 /* Setup timer 7 of PIC as a timestamp, no interrupts */
328 pic_init_timer(PIC_CLOCK_TIMER);
329 pic_set_timer(PIC_CLOCK_TIMER, ~UINT64_C(0));
330 platform_timecounter = &pic_timecounter;
331 }
332
333 static void
334 xlr_mem_init(void)
335 {
336 struct xlr_boot1_mem_map *boot_map;
337 vm_size_t physsz = 0;
338 int i, j;
339
340 /* get physical memory info from boot loader */
341 boot_map = (struct xlr_boot1_mem_map *)
342 (unsigned long)xlr_boot1_info.psb_mem_map;
343 for (i = 0, j = 0; i < boot_map->num_entries; i++, j += 2) {
344 if (boot_map->physmem_map[i].type != BOOT1_MEM_RAM)
345 continue;
346 if (j == 14) {
347 printf("*** ERROR *** memory map too large ***\n");
348 break;
349 }
350 if (j == 0) {
351 /* start after kernel end */
352 phys_avail[0] = (vm_paddr_t)
353 MIPS_KSEG0_TO_PHYS(&_end) + 0x20000;
354 /* boot loader start */
355 /* HACK to Use bootloaders memory region */
356 if (boot_map->physmem_map[0].size == 0x0c000000) {
357 boot_map->physmem_map[0].size = 0x0ff00000;
358 }
359 phys_avail[1] = boot_map->physmem_map[0].addr +
360 boot_map->physmem_map[0].size;
361 printf("First segment: addr:%#jx -> %#jx \n",
362 (uintmax_t)phys_avail[0],
363 (uintmax_t)phys_avail[1]);
364
365 dump_avail[0] = phys_avail[0];
366 dump_avail[1] = phys_avail[1];
367 } else {
368 #if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
369 /*
370 * In 32 bit physical address mode we cannot use
371 * mem > 0xffffffff
372 */
373 if (boot_map->physmem_map[i].addr > 0xfffff000U) {
374 printf("Memory: start %#jx size %#jx ignored"
375 "(>4GB)\n",
376 (intmax_t)boot_map->physmem_map[i].addr,
377 (intmax_t)boot_map->physmem_map[i].size);
378 continue;
379 }
380 if (boot_map->physmem_map[i].addr +
381 boot_map->physmem_map[i].size > 0xfffff000U) {
382 boot_map->physmem_map[i].size = 0xfffff000U -
383 boot_map->physmem_map[i].addr;
384 printf("Memory: start %#jx limited to 4GB\n",
385 (intmax_t)boot_map->physmem_map[i].addr);
386 }
387 #endif /* !PHYSADDR_64_BIT */
388 phys_avail[j] = (vm_paddr_t)
389 boot_map->physmem_map[i].addr;
390 phys_avail[j + 1] = phys_avail[j] +
391 boot_map->physmem_map[i].size;
392 printf("Next segment : addr:%#jx -> %#jx\n",
393 (uintmax_t)phys_avail[j],
394 (uintmax_t)phys_avail[j+1]);
395 }
396
397 dump_avail[j] = phys_avail[j];
398 dump_avail[j+1] = phys_avail[j+1];
399
400 physsz += boot_map->physmem_map[i].size;
401 }
402
403 phys_avail[j] = phys_avail[j + 1] = 0;
404 realmem = physmem = btoc(physsz);
405 }
406
407 void
408 platform_start(__register_t a0 __unused,
409 __register_t a1 __unused,
410 __register_t a2 __unused,
411 __register_t a3 __unused)
412 {
413 int i;
414 #ifdef SMP
415 uint32_t tmp;
416 void (*wakeup) (void *, void *, unsigned int);
417 #endif
418
419 /* Save boot loader and other stuff from scratch regs */
420 xlr_boot1_info = *(struct boot1_info *)(intptr_t)(int)read_c0_register32(MIPS_COP_0_OSSCRATCH, 0);
421 cpu_mask_info = read_c0_register64(MIPS_COP_0_OSSCRATCH, 1);
422 xlr_online_cpumask = read_c0_register32(MIPS_COP_0_OSSCRATCH, 2);
423 xlr_run_mode = read_c0_register32(MIPS_COP_0_OSSCRATCH, 3);
424 xlr_argc = read_c0_register32(MIPS_COP_0_OSSCRATCH, 4);
425 /*
426 * argv and envp are passed in array of 32bit pointers
427 */
428 xlr_argv = (int32_t *)(intptr_t)(int)read_c0_register32(MIPS_COP_0_OSSCRATCH, 5);
429 xlr_envp = (int32_t *)(intptr_t)(int)read_c0_register32(MIPS_COP_0_OSSCRATCH, 6);
430
431 /* Initialize pcpu stuff */
432 mips_pcpu0_init();
433
434 /* initialize console so that we have printf */
435 boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */
436
437 /* clockrate used by delay, so initialize it here */
438 cpu_clock = xlr_boot1_info.cpu_frequency / 1000000;
439
440 /*
441 * Note the time counter on CPU0 runs not at system clock speed, but
442 * at PIC time counter speed (which is returned by
443 * platform_get_frequency(). Thus we do not use
444 * xlr_boot1_info.cpu_frequency here.
445 */
446 mips_timer_early_init(xlr_boot1_info.cpu_frequency);
447
448 /* Init console please */
449 cninit();
450 init_static_kenv(boot1_env, sizeof(boot1_env));
451 printf("Environment (from %d args):\n", xlr_argc - 1);
452 if (xlr_argc == 1)
453 printf("\tNone\n");
454 for (i = 1; i < xlr_argc; i++) {
455 char *n, *arg;
456
457 arg = (char *)(intptr_t)xlr_argv[i];
458 printf("\t%s\n", arg);
459 n = strsep(&arg, "=");
460 if (arg == NULL)
461 setenv(n, "1");
462 else
463 setenv(n, arg);
464 }
465
466 xlr_set_boot_flags();
467 xlr_parse_mmu_options();
468
469 xlr_mem_init();
470 /* Set up hz, among others. */
471 mips_init();
472
473 #ifdef SMP
474 /*
475 * If thread 0 of any core is not available then mark whole core as
476 * not available
477 */
478 tmp = xlr_boot1_info.cpu_online_map;
479 for (i = 4; i < MAXCPU; i += 4) {
480 if ((tmp & (0xf << i)) && !(tmp & (0x1 << i))) {
481 /*
482 * Oops.. thread 0 is not available. Disable whole
483 * core
484 */
485 tmp = tmp & ~(0xf << i);
486 printf("WARNING: Core %d is disabled because thread 0"
487 " of this core is not enabled.\n", i / 4);
488 }
489 }
490 xlr_boot1_info.cpu_online_map = tmp;
491
492 /* Wakeup Other cpus, and put them in bsd park code. */
493 wakeup = ((void (*) (void *, void *, unsigned int))
494 (unsigned long)(xlr_boot1_info.wakeup));
495 printf("Waking up CPUs 0x%jx.\n",
496 (intmax_t)xlr_boot1_info.cpu_online_map & ~(0x1U));
497 if (xlr_boot1_info.cpu_online_map & ~(0x1U))
498 wakeup(mpwait, 0,
499 (unsigned int)xlr_boot1_info.cpu_online_map);
500 #endif
501
502 /* xlr specific post initialization */
503 /* initialize other on chip stuff */
504 xlr_board_info_setup();
505 xlr_msgring_config();
506 xlr_pic_init();
507 xlr_msgring_cpu_init();
508
509 mips_timer_init_params(xlr_boot1_info.cpu_frequency, 0);
510
511 printf("Platform specific startup now completes\n");
512 }
513
514 void
515 platform_cpu_init()
516 {
517 }
518
519 void
520 platform_reset(void)
521 {
522 xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_GPIO_OFFSET);
523
524 /* write 1 to GPIO software reset register */
525 xlr_write_reg(mmio, 8, 1);
526 }
527
528 #ifdef SMP
529 int xlr_ap_release[MAXCPU];
530
531 int
532 platform_start_ap(int cpuid)
533 {
534 int hwid = xlr_cpuid_to_hwtid[cpuid];
535
536 if (xlr_boot1_info.cpu_online_map & (1<<hwid)) {
537 /*
538 * other cpus are enabled by the boot loader and they will be
539 * already looping in mpwait, release them
540 */
541 atomic_store_rel_int(&xlr_ap_release[hwid], 1);
542 return (0);
543 } else
544 return (-1);
545 }
546
547 void
548 platform_init_ap(int cpuid)
549 {
550 uint32_t stat;
551
552 /* The first thread has to setup the core MMU split */
553 if (xlr_thr_id() == 0)
554 xlr_setup_mmu_split();
555
556 /* Setup interrupts for secondary CPUs here */
557 stat = mips_rd_status();
558 KASSERT((stat & MIPS_SR_INT_IE) == 0,
559 ("Interrupts enabled in %s!", __func__));
560 stat |= MIPS_SR_COP_2_BIT | MIPS_SR_COP_0_BIT;
561 mips_wr_status(stat);
562
563 write_c0_eimr64(0ULL);
564 xlr_enable_irq(IRQ_IPI);
565 xlr_enable_irq(IRQ_TIMER);
566 if (xlr_thr_id() == 0)
567 xlr_msgring_cpu_init();
568 xlr_enable_irq(IRQ_MSGRING);
569
570 return;
571 }
572
573 int
574 platform_ipi_intrnum(void)
575 {
576
577 return (IRQ_IPI);
578 }
579
580 void
581 platform_ipi_send(int cpuid)
582 {
583
584 pic_send_ipi(xlr_cpuid_to_hwtid[cpuid], platform_ipi_intrnum());
585 }
586
587 void
588 platform_ipi_clear(void)
589 {
590 }
591
592 int
593 platform_processor_id(void)
594 {
595
596 return (xlr_hwtid_to_cpuid[xlr_cpu_id()]);
597 }
598
599 void
600 platform_cpu_mask(cpuset_t *mask)
601 {
602 int i, s;
603
604 CPU_ZERO(mask);
605 s = xlr_ncores * xlr_threads_per_core;
606 for (i = 0; i < s; i++)
607 CPU_SET(i, mask);
608 }
609
610 struct cpu_group *
611 platform_smp_topo()
612 {
613
614 return (smp_topo_2level(CG_SHARE_L2, xlr_ncores, CG_SHARE_L1,
615 xlr_threads_per_core, CG_FLAG_THREAD));
616 }
617 #endif
Cache object: b569555312335743bae764b1c154da72
|