1 /*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
4 * All rights reserved.
5 *
6 * Portions of this software were developed by SRI International and the
7 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8 * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
9 *
10 * Portions of this software were developed by the University of Cambridge
11 * Computer Laboratory as part of the CTSRD Project, with support from the
12 * UK Higher Education Innovation Fund (HEIF).
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/buf.h>
44 #include <sys/bus.h>
45 #include <sys/cons.h>
46 #include <sys/cpu.h>
47 #include <sys/exec.h>
48 #include <sys/imgact.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/limits.h>
52 #include <sys/linker.h>
53 #include <sys/msgbuf.h>
54 #include <sys/pcpu.h>
55 #include <sys/proc.h>
56 #include <sys/ptrace.h>
57 #include <sys/reboot.h>
58 #include <sys/rwlock.h>
59 #include <sys/sched.h>
60 #include <sys/signalvar.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/sysent.h>
63 #include <sys/sysproto.h>
64 #include <sys/tslog.h>
65 #include <sys/ucontext.h>
66 #include <sys/vmmeter.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/pmap.h>
73 #include <vm/vm_map.h>
74 #include <vm/vm_pager.h>
75
76 #include <machine/riscvreg.h>
77 #include <machine/cpu.h>
78 #include <machine/kdb.h>
79 #include <machine/machdep.h>
80 #include <machine/pcb.h>
81 #include <machine/reg.h>
82 #include <machine/trap.h>
83 #include <machine/vmparam.h>
84 #include <machine/intr.h>
85 #include <machine/sbi.h>
86
87 #include <machine/asm.h>
88
89 #ifdef FPE
90 #include <machine/fpe.h>
91 #endif
92
93 #ifdef FDT
94 #include <dev/fdt/fdt_common.h>
95 #include <dev/ofw/openfirm.h>
96 #endif
97
98 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
99 static void set_fpcontext(struct thread *td, mcontext_t *mcp);
100
101 struct pcpu __pcpu[MAXCPU];
102
103 static struct trapframe proc0_tf;
104
105 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
106 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
107
108 int early_boot = 1;
109 int cold = 1;
110 long realmem = 0;
111 long Maxmem = 0;
112
113 #define DTB_SIZE_MAX (1024 * 1024)
114
115 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
116 vm_paddr_t physmap[PHYSMAP_SIZE];
117 u_int physmap_idx;
118
119 struct kva_md_info kmi;
120
121 int64_t dcache_line_size; /* The minimum D cache line size */
122 int64_t icache_line_size; /* The minimum I cache line size */
123 int64_t idcache_line_size; /* The minimum cache line size */
124
125 uint32_t boot_hart; /* The hart we booted on. */
126 cpuset_t all_harts;
127
128 extern int *end;
129
130 static void
131 cpu_startup(void *dummy)
132 {
133
134 sbi_print_version();
135 identify_cpu();
136
137 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem),
138 ptoa((uintmax_t)realmem) / (1024 * 1024));
139
140 /*
141 * Display any holes after the first chunk of extended memory.
142 */
143 if (bootverbose) {
144 int indx;
145
146 printf("Physical memory chunk(s):\n");
147 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
148 vm_paddr_t size;
149
150 size = phys_avail[indx + 1] - phys_avail[indx];
151 printf(
152 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
153 (uintmax_t)phys_avail[indx],
154 (uintmax_t)phys_avail[indx + 1] - 1,
155 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
156 }
157 }
158
159 vm_ksubmap_init(&kmi);
160
161 printf("avail memory = %ju (%ju MB)\n",
162 ptoa((uintmax_t)vm_free_count()),
163 ptoa((uintmax_t)vm_free_count()) / (1024 * 1024));
164
165 bufinit();
166 vm_pager_bufferinit();
167 }
168
169 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
170
171 int
172 cpu_idle_wakeup(int cpu)
173 {
174
175 return (0);
176 }
177
178 int
179 fill_regs(struct thread *td, struct reg *regs)
180 {
181 struct trapframe *frame;
182
183 frame = td->td_frame;
184 regs->sepc = frame->tf_sepc;
185 regs->sstatus = frame->tf_sstatus;
186 regs->ra = frame->tf_ra;
187 regs->sp = frame->tf_sp;
188 regs->gp = frame->tf_gp;
189 regs->tp = frame->tf_tp;
190
191 memcpy(regs->t, frame->tf_t, sizeof(regs->t));
192 memcpy(regs->s, frame->tf_s, sizeof(regs->s));
193 memcpy(regs->a, frame->tf_a, sizeof(regs->a));
194
195 return (0);
196 }
197
198 int
199 set_regs(struct thread *td, struct reg *regs)
200 {
201 struct trapframe *frame;
202
203 frame = td->td_frame;
204 frame->tf_sepc = regs->sepc;
205 frame->tf_ra = regs->ra;
206 frame->tf_sp = regs->sp;
207 frame->tf_gp = regs->gp;
208 frame->tf_tp = regs->tp;
209
210 memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t));
211 memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s));
212 memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a));
213
214 return (0);
215 }
216
217 int
218 fill_fpregs(struct thread *td, struct fpreg *regs)
219 {
220 #ifdef FPE
221 struct pcb *pcb;
222
223 pcb = td->td_pcb;
224
225 if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
226 /*
227 * If we have just been running FPE instructions we will
228 * need to save the state to memcpy it below.
229 */
230 if (td == curthread)
231 fpe_state_save(td);
232
233 memcpy(regs->fp_x, pcb->pcb_x, sizeof(regs->fp_x));
234 regs->fp_fcsr = pcb->pcb_fcsr;
235 } else
236 #endif
237 memset(regs, 0, sizeof(*regs));
238
239 return (0);
240 }
241
242 int
243 set_fpregs(struct thread *td, struct fpreg *regs)
244 {
245 #ifdef FPE
246 struct trapframe *frame;
247 struct pcb *pcb;
248
249 frame = td->td_frame;
250 pcb = td->td_pcb;
251
252 memcpy(pcb->pcb_x, regs->fp_x, sizeof(regs->fp_x));
253 pcb->pcb_fcsr = regs->fp_fcsr;
254 pcb->pcb_fpflags |= PCB_FP_STARTED;
255 frame->tf_sstatus &= ~SSTATUS_FS_MASK;
256 frame->tf_sstatus |= SSTATUS_FS_CLEAN;
257 #endif
258
259 return (0);
260 }
261
262 int
263 fill_dbregs(struct thread *td, struct dbreg *regs)
264 {
265
266 panic("fill_dbregs");
267 }
268
269 int
270 set_dbregs(struct thread *td, struct dbreg *regs)
271 {
272
273 panic("set_dbregs");
274 }
275
276 int
277 ptrace_set_pc(struct thread *td, u_long addr)
278 {
279
280 panic("ptrace_set_pc");
281 return (0);
282 }
283
284 int
285 ptrace_single_step(struct thread *td)
286 {
287
288 /* TODO; */
289 return (0);
290 }
291
292 int
293 ptrace_clear_single_step(struct thread *td)
294 {
295
296 /* TODO; */
297 return (0);
298 }
299
300 void
301 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
302 {
303 struct trapframe *tf;
304 struct pcb *pcb;
305
306 tf = td->td_frame;
307 pcb = td->td_pcb;
308
309 memset(tf, 0, sizeof(struct trapframe));
310
311 tf->tf_a[0] = stack;
312 tf->tf_sp = STACKALIGN(stack);
313 tf->tf_ra = imgp->entry_addr;
314 tf->tf_sepc = imgp->entry_addr;
315
316 pcb->pcb_fpflags &= ~PCB_FP_STARTED;
317 }
318
319 /* Sanity check these are the same size, they will be memcpy'd to and fro */
320 CTASSERT(sizeof(((struct trapframe *)0)->tf_a) ==
321 sizeof((struct gpregs *)0)->gp_a);
322 CTASSERT(sizeof(((struct trapframe *)0)->tf_s) ==
323 sizeof((struct gpregs *)0)->gp_s);
324 CTASSERT(sizeof(((struct trapframe *)0)->tf_t) ==
325 sizeof((struct gpregs *)0)->gp_t);
326 CTASSERT(sizeof(((struct trapframe *)0)->tf_a) ==
327 sizeof((struct reg *)0)->a);
328 CTASSERT(sizeof(((struct trapframe *)0)->tf_s) ==
329 sizeof((struct reg *)0)->s);
330 CTASSERT(sizeof(((struct trapframe *)0)->tf_t) ==
331 sizeof((struct reg *)0)->t);
332
333 /* Support for FDT configurations only. */
334 CTASSERT(FDT);
335
336 int
337 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
338 {
339 struct trapframe *tf = td->td_frame;
340
341 memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t));
342 memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s));
343 memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a));
344
345 if (clear_ret & GET_MC_CLEAR_RET) {
346 mcp->mc_gpregs.gp_a[0] = 0;
347 mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */
348 }
349
350 mcp->mc_gpregs.gp_ra = tf->tf_ra;
351 mcp->mc_gpregs.gp_sp = tf->tf_sp;
352 mcp->mc_gpregs.gp_gp = tf->tf_gp;
353 mcp->mc_gpregs.gp_tp = tf->tf_tp;
354 mcp->mc_gpregs.gp_sepc = tf->tf_sepc;
355 mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus;
356 get_fpcontext(td, mcp);
357
358 return (0);
359 }
360
361 int
362 set_mcontext(struct thread *td, mcontext_t *mcp)
363 {
364 struct trapframe *tf;
365
366 tf = td->td_frame;
367
368 /*
369 * Permit changes to the USTATUS bits of SSTATUS.
370 *
371 * Ignore writes to read-only bits (SD, XS).
372 *
373 * Ignore writes to the FS field as set_fpcontext() will set
374 * it explicitly.
375 */
376 if (((mcp->mc_gpregs.gp_sstatus ^ tf->tf_sstatus) &
377 ~(SSTATUS_SD | SSTATUS_XS_MASK | SSTATUS_FS_MASK | SSTATUS_UPIE |
378 SSTATUS_UIE)) != 0)
379 return (EINVAL);
380
381 memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t));
382 memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s));
383 memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a));
384
385 tf->tf_ra = mcp->mc_gpregs.gp_ra;
386 tf->tf_sp = mcp->mc_gpregs.gp_sp;
387 tf->tf_gp = mcp->mc_gpregs.gp_gp;
388 tf->tf_sepc = mcp->mc_gpregs.gp_sepc;
389 tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus;
390 set_fpcontext(td, mcp);
391
392 return (0);
393 }
394
395 static void
396 get_fpcontext(struct thread *td, mcontext_t *mcp)
397 {
398 #ifdef FPE
399 struct pcb *curpcb;
400
401 critical_enter();
402
403 curpcb = curthread->td_pcb;
404
405 KASSERT(td->td_pcb == curpcb, ("Invalid fpe pcb"));
406
407 if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
408 /*
409 * If we have just been running FPE instructions we will
410 * need to save the state to memcpy it below.
411 */
412 fpe_state_save(td);
413
414 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
415 ("Non-userspace FPE flags set in get_fpcontext"));
416 memcpy(mcp->mc_fpregs.fp_x, curpcb->pcb_x,
417 sizeof(mcp->mc_fpregs.fp_x));
418 mcp->mc_fpregs.fp_fcsr = curpcb->pcb_fcsr;
419 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
420 mcp->mc_flags |= _MC_FP_VALID;
421 }
422
423 critical_exit();
424 #endif
425 }
426
427 static void
428 set_fpcontext(struct thread *td, mcontext_t *mcp)
429 {
430 #ifdef FPE
431 struct pcb *curpcb;
432 #endif
433
434 td->td_frame->tf_sstatus &= ~SSTATUS_FS_MASK;
435 td->td_frame->tf_sstatus |= SSTATUS_FS_OFF;
436
437 #ifdef FPE
438 critical_enter();
439
440 if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
441 curpcb = curthread->td_pcb;
442 /* FPE usage is enabled, override registers. */
443 memcpy(curpcb->pcb_x, mcp->mc_fpregs.fp_x,
444 sizeof(mcp->mc_fpregs.fp_x));
445 curpcb->pcb_fcsr = mcp->mc_fpregs.fp_fcsr;
446 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
447 td->td_frame->tf_sstatus |= SSTATUS_FS_CLEAN;
448 }
449
450 critical_exit();
451 #endif
452 }
453
454 void
455 cpu_idle(int busy)
456 {
457
458 spinlock_enter();
459 if (!busy)
460 cpu_idleclock();
461 if (!sched_runnable())
462 __asm __volatile(
463 "fence \n"
464 "wfi \n");
465 if (!busy)
466 cpu_activeclock();
467 spinlock_exit();
468 }
469
470 void
471 cpu_halt(void)
472 {
473
474 /*
475 * Try to power down using the HSM SBI extension and fall back to a
476 * simple wfi loop.
477 */
478 intr_disable();
479 if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0)
480 sbi_hsm_hart_stop();
481 for (;;)
482 __asm __volatile("wfi");
483 /* NOTREACHED */
484 }
485
486 /*
487 * Flush the D-cache for non-DMA I/O so that the I-cache can
488 * be made coherent later.
489 */
490 void
491 cpu_flush_dcache(void *ptr, size_t len)
492 {
493
494 /* TBD */
495 }
496
497 /* Get current clock frequency for the given CPU ID. */
498 int
499 cpu_est_clockrate(int cpu_id, uint64_t *rate)
500 {
501
502 panic("cpu_est_clockrate");
503 }
504
505 void
506 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
507 {
508 }
509
510 void
511 spinlock_enter(void)
512 {
513 struct thread *td;
514 register_t reg;
515
516 td = curthread;
517 if (td->td_md.md_spinlock_count == 0) {
518 reg = intr_disable();
519 td->td_md.md_spinlock_count = 1;
520 td->td_md.md_saved_sstatus_ie = reg;
521 } else
522 td->td_md.md_spinlock_count++;
523 critical_enter();
524 }
525
526 void
527 spinlock_exit(void)
528 {
529 struct thread *td;
530 register_t sstatus_ie;
531
532 td = curthread;
533 critical_exit();
534 sstatus_ie = td->td_md.md_saved_sstatus_ie;
535 td->td_md.md_spinlock_count--;
536 if (td->td_md.md_spinlock_count == 0)
537 intr_restore(sstatus_ie);
538 }
539
540 #ifndef _SYS_SYSPROTO_H_
541 struct sigreturn_args {
542 ucontext_t *ucp;
543 };
544 #endif
545
546 int
547 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
548 {
549 ucontext_t uc;
550 int error;
551
552 if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
553 return (EFAULT);
554
555 error = set_mcontext(td, &uc.uc_mcontext);
556 if (error != 0)
557 return (error);
558
559 /* Restore signal mask. */
560 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
561
562 return (EJUSTRETURN);
563 }
564
565 /*
566 * Construct a PCB from a trapframe. This is called from kdb_trap() where
567 * we want to start a backtrace from the function that caused us to enter
568 * the debugger. We have the context in the trapframe, but base the trace
569 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
570 * enough for a backtrace.
571 */
572 void
573 makectx(struct trapframe *tf, struct pcb *pcb)
574 {
575
576 memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s));
577
578 pcb->pcb_ra = tf->tf_sepc;
579 pcb->pcb_sp = tf->tf_sp;
580 pcb->pcb_gp = tf->tf_gp;
581 pcb->pcb_tp = tf->tf_tp;
582 }
583
584 void
585 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
586 {
587 struct sigframe *fp, frame;
588 struct sysentvec *sysent;
589 struct trapframe *tf;
590 struct sigacts *psp;
591 struct thread *td;
592 struct proc *p;
593 int onstack;
594 int sig;
595
596 td = curthread;
597 p = td->td_proc;
598 PROC_LOCK_ASSERT(p, MA_OWNED);
599
600 sig = ksi->ksi_signo;
601 psp = p->p_sigacts;
602 mtx_assert(&psp->ps_mtx, MA_OWNED);
603
604 tf = td->td_frame;
605 onstack = sigonstack(tf->tf_sp);
606
607 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
608 catcher, sig);
609
610 /* Allocate and validate space for the signal handler context. */
611 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
612 SIGISMEMBER(psp->ps_sigonstack, sig)) {
613 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
614 td->td_sigstk.ss_size);
615 } else {
616 fp = (struct sigframe *)td->td_frame->tf_sp;
617 }
618
619 /* Make room, keeping the stack aligned */
620 fp--;
621 fp = (struct sigframe *)STACKALIGN(fp);
622
623 /* Fill in the frame to copy out */
624 bzero(&frame, sizeof(frame));
625 get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
626 frame.sf_si = ksi->ksi_info;
627 frame.sf_uc.uc_sigmask = *mask;
628 frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
629 ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
630 frame.sf_uc.uc_stack = td->td_sigstk;
631 mtx_unlock(&psp->ps_mtx);
632 PROC_UNLOCK(td->td_proc);
633
634 /* Copy the sigframe out to the user's stack. */
635 if (copyout(&frame, fp, sizeof(*fp)) != 0) {
636 /* Process has trashed its stack. Kill it. */
637 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
638 PROC_LOCK(p);
639 sigexit(td, SIGILL);
640 }
641
642 tf->tf_a[0] = sig;
643 tf->tf_a[1] = (register_t)&fp->sf_si;
644 tf->tf_a[2] = (register_t)&fp->sf_uc;
645
646 tf->tf_sepc = (register_t)catcher;
647 tf->tf_sp = (register_t)fp;
648
649 sysent = p->p_sysent;
650 if (sysent->sv_sigcode_base != 0)
651 tf->tf_ra = (register_t)sysent->sv_sigcode_base;
652 else
653 tf->tf_ra = (register_t)(sysent->sv_psstrings -
654 *(sysent->sv_szsigcode));
655
656 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc,
657 tf->tf_sp);
658
659 PROC_LOCK(p);
660 mtx_lock(&psp->ps_mtx);
661 }
662
663 static void
664 init_proc0(vm_offset_t kstack)
665 {
666 struct pcpu *pcpup;
667
668 pcpup = &__pcpu[0];
669
670 proc_linkup0(&proc0, &thread0);
671 thread0.td_kstack = kstack;
672 thread0.td_kstack_pages = KSTACK_PAGES;
673 thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
674 thread0.td_kstack_pages * PAGE_SIZE) - 1;
675 thread0.td_pcb->pcb_fpflags = 0;
676 thread0.td_frame = &proc0_tf;
677 pcpup->pc_curpcb = thread0.td_pcb;
678 }
679
680 static int
681 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
682 u_int *physmap_idxp)
683 {
684 u_int i, insert_idx, _physmap_idx;
685
686 _physmap_idx = *physmap_idxp;
687
688 if (length == 0)
689 return (1);
690
691 /*
692 * Find insertion point while checking for overlap. Start off by
693 * assuming the new entry will be added to the end.
694 */
695 insert_idx = _physmap_idx;
696 for (i = 0; i <= _physmap_idx; i += 2) {
697 if (base < physmap[i + 1]) {
698 if (base + length <= physmap[i]) {
699 insert_idx = i;
700 break;
701 }
702 if (boothowto & RB_VERBOSE)
703 printf(
704 "Overlapping memory regions, ignoring second region\n");
705 return (1);
706 }
707 }
708
709 /* See if we can prepend to the next entry. */
710 if (insert_idx <= _physmap_idx &&
711 base + length == physmap[insert_idx]) {
712 physmap[insert_idx] = base;
713 return (1);
714 }
715
716 /* See if we can append to the previous entry. */
717 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
718 physmap[insert_idx - 1] += length;
719 return (1);
720 }
721
722 _physmap_idx += 2;
723 *physmap_idxp = _physmap_idx;
724 if (_physmap_idx == PHYSMAP_SIZE) {
725 printf(
726 "Too many segments in the physical address map, giving up\n");
727 return (0);
728 }
729
730 /*
731 * Move the last 'N' entries down to make room for the new
732 * entry if needed.
733 */
734 for (i = _physmap_idx; i > insert_idx; i -= 2) {
735 physmap[i] = physmap[i - 2];
736 physmap[i + 1] = physmap[i - 1];
737 }
738
739 /* Insert the new entry. */
740 physmap[insert_idx] = base;
741 physmap[insert_idx + 1] = base + length;
742
743 printf("physmap[%d] = 0x%016lx\n", insert_idx, base);
744 printf("physmap[%d] = 0x%016lx\n", insert_idx + 1, base + length);
745 return (1);
746 }
747
748 #ifdef FDT
749 static void
750 try_load_dtb(caddr_t kmdp, vm_offset_t dtbp)
751 {
752
753 #if defined(FDT_DTB_STATIC)
754 dtbp = (vm_offset_t)&fdt_static_dtb;
755 #endif
756
757 if (dtbp == (vm_offset_t)NULL) {
758 printf("ERROR loading DTB\n");
759 return;
760 }
761
762 if (OF_install(OFW_FDT, 0) == FALSE)
763 panic("Cannot install FDT");
764
765 if (OF_init((void *)dtbp) != 0)
766 panic("OF_init failed with the found device tree");
767 }
768 #endif
769
770 static void
771 cache_setup(void)
772 {
773
774 /* TODO */
775
776 dcache_line_size = 0;
777 icache_line_size = 0;
778 idcache_line_size = 0;
779 }
780
781 /*
782 * Fake up a boot descriptor table.
783 * RISCVTODO: This needs to be done via loader (when it's available).
784 */
785 vm_offset_t
786 fake_preload_metadata(struct riscv_bootparams *rvbp __unused)
787 {
788 static uint32_t fake_preload[35];
789 #ifdef DDB
790 vm_offset_t zstart = 0, zend = 0;
791 #endif
792 vm_offset_t lastaddr;
793 int i;
794
795 i = 0;
796
797 fake_preload[i++] = MODINFO_NAME;
798 fake_preload[i++] = strlen("kernel") + 1;
799 strcpy((char*)&fake_preload[i++], "kernel");
800 i += 1;
801 fake_preload[i++] = MODINFO_TYPE;
802 fake_preload[i++] = strlen("elf64 kernel") + 1;
803 strcpy((char*)&fake_preload[i++], "elf64 kernel");
804 i += 3;
805 fake_preload[i++] = MODINFO_ADDR;
806 fake_preload[i++] = sizeof(vm_offset_t);
807 *(vm_offset_t *)&fake_preload[i++] =
808 (vm_offset_t)(KERNBASE + KERNENTRY);
809 i += 1;
810 fake_preload[i++] = MODINFO_SIZE;
811 fake_preload[i++] = sizeof(vm_offset_t);
812 fake_preload[i++] = (vm_offset_t)&end -
813 (vm_offset_t)(KERNBASE + KERNENTRY);
814 i += 1;
815 #ifdef DDB
816 #if 0
817 /* RISCVTODO */
818 if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
819 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
820 fake_preload[i++] = sizeof(vm_offset_t);
821 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
822 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
823 fake_preload[i++] = sizeof(vm_offset_t);
824 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
825 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
826 zend = lastaddr;
827 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
828 db_fetch_ksymtab(zstart, zend);
829 } else
830 #endif
831 #endif
832 lastaddr = (vm_offset_t)&end;
833 fake_preload[i++] = 0;
834 fake_preload[i] = 0;
835 preload_metadata = (void *)fake_preload;
836
837 return (lastaddr);
838 }
839
840 void
841 initriscv(struct riscv_bootparams *rvbp)
842 {
843 struct mem_region mem_regions[FDT_MEM_REGIONS];
844 struct pcpu *pcpup;
845 vm_offset_t rstart, rend;
846 vm_offset_t s, e;
847 int mem_regions_sz;
848 vm_offset_t lastaddr;
849 vm_size_t kernlen;
850 caddr_t kmdp;
851 int i;
852
853 TSRAW(&thread0, TS_ENTER, __func__, NULL);
854
855 /* Set the pcpu data, this is needed by pmap_bootstrap */
856 pcpup = &__pcpu[0];
857 pcpu_init(pcpup, 0, sizeof(struct pcpu));
858 pcpup->pc_hart = boot_hart;
859
860 /* Set the pcpu pointer */
861 __asm __volatile("mv tp, %0" :: "r"(pcpup));
862
863 PCPU_SET(curthread, &thread0);
864
865 /* Initialize SBI interface. */
866 sbi_init();
867
868 /* Set the module data location */
869 lastaddr = fake_preload_metadata(rvbp);
870
871 /* Find the kernel address */
872 kmdp = preload_search_by_type("elf kernel");
873 if (kmdp == NULL)
874 kmdp = preload_search_by_type("elf64 kernel");
875
876 boothowto = RB_VERBOSE | RB_SINGLE;
877 boothowto = RB_VERBOSE;
878
879 kern_envp = NULL;
880
881 #ifdef FDT
882 try_load_dtb(kmdp, rvbp->dtbp_virt);
883 #endif
884
885 /* Load the physical memory ranges */
886 physmap_idx = 0;
887
888 #ifdef FDT
889 /* Grab physical memory regions information from device tree. */
890 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0)
891 panic("Cannot get physical memory regions");
892
893 s = rvbp->dtbp_phys;
894 e = s + DTB_SIZE_MAX;
895
896 for (i = 0; i < mem_regions_sz; i++) {
897 rstart = mem_regions[i].mr_start;
898 rend = (mem_regions[i].mr_start + mem_regions[i].mr_size);
899
900 if ((rstart < s) && (rend > e)) {
901 /* Exclude DTB region. */
902 add_physmap_entry(rstart, (s - rstart), physmap, &physmap_idx);
903 add_physmap_entry(e, (rend - e), physmap, &physmap_idx);
904 } else {
905 add_physmap_entry(mem_regions[i].mr_start,
906 mem_regions[i].mr_size, physmap, &physmap_idx);
907 }
908 }
909 #endif
910
911 /* Do basic tuning, hz etc */
912 init_param1();
913
914 cache_setup();
915
916 /* Bootstrap enough of pmap to enter the kernel proper */
917 kernlen = (lastaddr - KERNBASE);
918 pmap_bootstrap(rvbp->kern_l1pt, mem_regions[0].mr_start, kernlen);
919
920 cninit();
921
922 init_proc0(rvbp->kern_stack);
923
924 msgbufinit(msgbufp, msgbufsize);
925 mutex_init();
926 init_param2(physmem);
927 kdb_init();
928
929 early_boot = 0;
930
931 TSEXIT();
932 }
933
934 #undef bzero
935 void
936 bzero(void *buf, size_t len)
937 {
938 uint8_t *p;
939
940 p = buf;
941 while(len-- > 0)
942 *p++ = 0;
943 }
Cache object: 4274ecc9e3e6d79bedf4e31d7ed2e190
|