The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm64/arm64/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2014 Andrew Turner
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  */
   27 
   28 #include "opt_platform.h"
   29 #include "opt_ddb.h"
   30 
   31 #include <sys/cdefs.h>
   32 __FBSDID("$FreeBSD: releng/11.0/sys/arm64/arm64/machdep.c 298627 2016-04-26 11:53:37Z br $");
   33 
   34 #include <sys/param.h>
   35 #include <sys/systm.h>
   36 #include <sys/buf.h>
   37 #include <sys/bus.h>
   38 #include <sys/cons.h>
   39 #include <sys/cpu.h>
   40 #include <sys/devmap.h>
   41 #include <sys/efi.h>
   42 #include <sys/exec.h>
   43 #include <sys/imgact.h>
   44 #include <sys/kdb.h> 
   45 #include <sys/kernel.h>
   46 #include <sys/limits.h>
   47 #include <sys/linker.h>
   48 #include <sys/msgbuf.h>
   49 #include <sys/pcpu.h>
   50 #include <sys/proc.h>
   51 #include <sys/ptrace.h>
   52 #include <sys/reboot.h>
   53 #include <sys/rwlock.h>
   54 #include <sys/sched.h>
   55 #include <sys/signalvar.h>
   56 #include <sys/syscallsubr.h>
   57 #include <sys/sysent.h>
   58 #include <sys/sysproto.h>
   59 #include <sys/ucontext.h>
   60 #include <sys/vdso.h>
   61 
   62 #include <vm/vm.h>
   63 #include <vm/vm_kern.h>
   64 #include <vm/vm_object.h>
   65 #include <vm/vm_page.h>
   66 #include <vm/pmap.h>
   67 #include <vm/vm_map.h>
   68 #include <vm/vm_pager.h>
   69 
   70 #include <machine/armreg.h>
   71 #include <machine/cpu.h>
   72 #include <machine/debug_monitor.h>
   73 #include <machine/kdb.h>
   74 #include <machine/machdep.h>
   75 #include <machine/metadata.h>
   76 #include <machine/md_var.h>
   77 #include <machine/pcb.h>
   78 #include <machine/reg.h>
   79 #include <machine/vmparam.h>
   80 
   81 #ifdef VFP
   82 #include <machine/vfp.h>
   83 #endif
   84 
   85 #ifdef FDT
   86 #include <dev/fdt/fdt_common.h>
   87 #include <dev/ofw/openfirm.h>
   88 #endif
   89 
   90 struct pcpu __pcpu[MAXCPU];
   91 
   92 static struct trapframe proc0_tf;
   93 
   94 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
   95 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
   96 
   97 int early_boot = 1;
   98 int cold = 1;
   99 long realmem = 0;
  100 long Maxmem = 0;
  101 
  102 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
  103 vm_paddr_t physmap[PHYSMAP_SIZE];
  104 u_int physmap_idx;
  105 
  106 struct kva_md_info kmi;
  107 
  108 int64_t dcache_line_size;       /* The minimum D cache line size */
  109 int64_t icache_line_size;       /* The minimum I cache line size */
  110 int64_t idcache_line_size;      /* The minimum cache line size */
  111 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
  112 
  113 /* pagezero_* implementations are provided in support.S */
  114 void pagezero_simple(void *);
  115 void pagezero_cache(void *);
  116 
  117 /* pagezero_simple is default pagezero */
  118 void (*pagezero)(void *p) = pagezero_simple;
  119 
  120 static void
  121 cpu_startup(void *dummy)
  122 {
  123 
  124         identify_cpu();
  125 
  126         vm_ksubmap_init(&kmi);
  127         bufinit();
  128         vm_pager_bufferinit();
  129 }
  130 
  131 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
  132 
  133 int
  134 cpu_idle_wakeup(int cpu)
  135 {
  136 
  137         return (0);
  138 }
  139 
  140 int
  141 fill_regs(struct thread *td, struct reg *regs)
  142 {
  143         struct trapframe *frame;
  144 
  145         frame = td->td_frame;
  146         regs->sp = frame->tf_sp;
  147         regs->lr = frame->tf_lr;
  148         regs->elr = frame->tf_elr;
  149         regs->spsr = frame->tf_spsr;
  150 
  151         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
  152 
  153         return (0);
  154 }
  155 
  156 int
  157 set_regs(struct thread *td, struct reg *regs)
  158 {
  159         struct trapframe *frame;
  160 
  161         frame = td->td_frame;
  162         frame->tf_sp = regs->sp;
  163         frame->tf_lr = regs->lr;
  164         frame->tf_elr = regs->elr;
  165         frame->tf_spsr = regs->spsr;
  166 
  167         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
  168 
  169         return (0);
  170 }
  171 
  172 int
  173 fill_fpregs(struct thread *td, struct fpreg *regs)
  174 {
  175 #ifdef VFP
  176         struct pcb *pcb;
  177 
  178         pcb = td->td_pcb;
  179         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
  180                 /*
  181                  * If we have just been running VFP instructions we will
  182                  * need to save the state to memcpy it below.
  183                  */
  184                 vfp_save_state(td, pcb);
  185 
  186                 memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
  187                 regs->fp_cr = pcb->pcb_fpcr;
  188                 regs->fp_sr = pcb->pcb_fpsr;
  189         } else
  190 #endif
  191                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
  192         return (0);
  193 }
  194 
  195 int
  196 set_fpregs(struct thread *td, struct fpreg *regs)
  197 {
  198 #ifdef VFP
  199         struct pcb *pcb;
  200 
  201         pcb = td->td_pcb;
  202         memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
  203         pcb->pcb_fpcr = regs->fp_cr;
  204         pcb->pcb_fpsr = regs->fp_sr;
  205 #endif
  206         return (0);
  207 }
  208 
  209 int
  210 fill_dbregs(struct thread *td, struct dbreg *regs)
  211 {
  212 
  213         panic("ARM64TODO: fill_dbregs");
  214 }
  215 
  216 int
  217 set_dbregs(struct thread *td, struct dbreg *regs)
  218 {
  219 
  220         panic("ARM64TODO: set_dbregs");
  221 }
  222 
  223 int
  224 ptrace_set_pc(struct thread *td, u_long addr)
  225 {
  226 
  227         panic("ARM64TODO: ptrace_set_pc");
  228         return (0);
  229 }
  230 
  231 int
  232 ptrace_single_step(struct thread *td)
  233 {
  234 
  235         td->td_frame->tf_spsr |= PSR_SS;
  236         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
  237         return (0);
  238 }
  239 
  240 int
  241 ptrace_clear_single_step(struct thread *td)
  242 {
  243 
  244         td->td_frame->tf_spsr &= ~PSR_SS;
  245         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
  246         return (0);
  247 }
  248 
  249 void
  250 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
  251 {
  252         struct trapframe *tf = td->td_frame;
  253 
  254         memset(tf, 0, sizeof(struct trapframe));
  255 
  256         /*
  257          * We need to set x0 for init as it doesn't call
  258          * cpu_set_syscall_retval to copy the value. We also
  259          * need to set td_retval for the cases where we do.
  260          */
  261         tf->tf_x[0] = td->td_retval[0] = stack;
  262         tf->tf_sp = STACKALIGN(stack);
  263         tf->tf_lr = imgp->entry_addr;
  264         tf->tf_elr = imgp->entry_addr;
  265 }
  266 
  267 /* Sanity check these are the same size, they will be memcpy'd to and fro */
  268 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
  269     sizeof((struct gpregs *)0)->gp_x);
  270 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
  271     sizeof((struct reg *)0)->x);
  272 
  273 int
  274 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
  275 {
  276         struct trapframe *tf = td->td_frame;
  277 
  278         if (clear_ret & GET_MC_CLEAR_RET) {
  279                 mcp->mc_gpregs.gp_x[0] = 0;
  280                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
  281         } else {
  282                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
  283                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
  284         }
  285 
  286         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
  287             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
  288 
  289         mcp->mc_gpregs.gp_sp = tf->tf_sp;
  290         mcp->mc_gpregs.gp_lr = tf->tf_lr;
  291         mcp->mc_gpregs.gp_elr = tf->tf_elr;
  292 
  293         return (0);
  294 }
  295 
  296 int
  297 set_mcontext(struct thread *td, mcontext_t *mcp)
  298 {
  299         struct trapframe *tf = td->td_frame;
  300 
  301         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
  302 
  303         tf->tf_sp = mcp->mc_gpregs.gp_sp;
  304         tf->tf_lr = mcp->mc_gpregs.gp_lr;
  305         tf->tf_elr = mcp->mc_gpregs.gp_elr;
  306         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
  307 
  308         return (0);
  309 }
  310 
  311 static void
  312 get_fpcontext(struct thread *td, mcontext_t *mcp)
  313 {
  314 #ifdef VFP
  315         struct pcb *curpcb;
  316 
  317         critical_enter();
  318 
  319         curpcb = curthread->td_pcb;
  320 
  321         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
  322                 /*
  323                  * If we have just been running VFP instructions we will
  324                  * need to save the state to memcpy it below.
  325                  */
  326                 vfp_save_state(td, curpcb);
  327 
  328                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
  329                     sizeof(mcp->mc_fpregs));
  330                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
  331                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
  332                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
  333                 mcp->mc_flags |= _MC_FP_VALID;
  334         }
  335 
  336         critical_exit();
  337 #endif
  338 }
  339 
  340 static void
  341 set_fpcontext(struct thread *td, mcontext_t *mcp)
  342 {
  343 #ifdef VFP
  344         struct pcb *curpcb;
  345 
  346         critical_enter();
  347 
  348         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
  349                 curpcb = curthread->td_pcb;
  350 
  351                 /*
  352                  * Discard any vfp state for the current thread, we
  353                  * are about to override it.
  354                  */
  355                 vfp_discard(td);
  356 
  357                 memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
  358                     sizeof(mcp->mc_fpregs));
  359                 curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
  360                 curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
  361                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
  362         }
  363 
  364         critical_exit();
  365 #endif
  366 }
  367 
  368 void
  369 cpu_idle(int busy)
  370 {
  371 
  372         spinlock_enter();
  373         if (!busy)
  374                 cpu_idleclock();
  375         if (!sched_runnable())
  376                 __asm __volatile(
  377                     "dsb sy \n"
  378                     "wfi    \n");
  379         if (!busy)
  380                 cpu_activeclock();
  381         spinlock_exit();
  382 }
  383 
  384 void
  385 cpu_halt(void)
  386 {
  387 
  388         /* We should have shutdown by now, if not enter a low power sleep */
  389         intr_disable();
  390         while (1) {
  391                 __asm __volatile("wfi");
  392         }
  393 }
  394 
  395 /*
  396  * Flush the D-cache for non-DMA I/O so that the I-cache can
  397  * be made coherent later.
  398  */
  399 void
  400 cpu_flush_dcache(void *ptr, size_t len)
  401 {
  402 
  403         /* ARM64TODO TBD */
  404 }
  405 
  406 /* Get current clock frequency for the given CPU ID. */
  407 int
  408 cpu_est_clockrate(int cpu_id, uint64_t *rate)
  409 {
  410 
  411         panic("ARM64TODO: cpu_est_clockrate");
  412 }
  413 
  414 void
  415 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  416 {
  417 
  418         pcpu->pc_acpi_id = 0xffffffff;
  419 }
  420 
  421 void
  422 spinlock_enter(void)
  423 {
  424         struct thread *td;
  425         register_t daif;
  426 
  427         td = curthread;
  428         if (td->td_md.md_spinlock_count == 0) {
  429                 daif = intr_disable();
  430                 td->td_md.md_spinlock_count = 1;
  431                 td->td_md.md_saved_daif = daif;
  432         } else
  433                 td->td_md.md_spinlock_count++;
  434         critical_enter();
  435 }
  436 
  437 void
  438 spinlock_exit(void)
  439 {
  440         struct thread *td;
  441         register_t daif;
  442 
  443         td = curthread;
  444         critical_exit();
  445         daif = td->td_md.md_saved_daif;
  446         td->td_md.md_spinlock_count--;
  447         if (td->td_md.md_spinlock_count == 0)
  448                 intr_restore(daif);
  449 }
  450 
  451 #ifndef _SYS_SYSPROTO_H_
  452 struct sigreturn_args {
  453         ucontext_t *ucp;
  454 };
  455 #endif
  456 
  457 int
  458 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
  459 {
  460         ucontext_t uc;
  461         uint32_t spsr;
  462 
  463         if (uap == NULL)
  464                 return (EFAULT);
  465         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
  466                 return (EFAULT);
  467 
  468         spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
  469         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
  470             (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
  471                 return (EINVAL); 
  472 
  473         set_mcontext(td, &uc.uc_mcontext);
  474         set_fpcontext(td, &uc.uc_mcontext);
  475 
  476         /* Restore signal mask. */
  477         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
  478 
  479         return (EJUSTRETURN);
  480 }
  481 
  482 /*
  483  * Construct a PCB from a trapframe. This is called from kdb_trap() where
  484  * we want to start a backtrace from the function that caused us to enter
  485  * the debugger. We have the context in the trapframe, but base the trace
  486  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
  487  * enough for a backtrace.
  488  */
  489 void
  490 makectx(struct trapframe *tf, struct pcb *pcb)
  491 {
  492         int i;
  493 
  494         for (i = 0; i < PCB_LR; i++)
  495                 pcb->pcb_x[i] = tf->tf_x[i];
  496 
  497         pcb->pcb_x[PCB_LR] = tf->tf_lr;
  498         pcb->pcb_pc = tf->tf_elr;
  499         pcb->pcb_sp = tf->tf_sp;
  500 }
  501 
  502 void
  503 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
  504 {
  505         struct thread *td;
  506         struct proc *p;
  507         struct trapframe *tf;
  508         struct sigframe *fp, frame;
  509         struct sigacts *psp;
  510         struct sysentvec *sysent;
  511         int code, onstack, sig;
  512 
  513         td = curthread;
  514         p = td->td_proc;
  515         PROC_LOCK_ASSERT(p, MA_OWNED);
  516 
  517         sig = ksi->ksi_signo;
  518         code = ksi->ksi_code;
  519         psp = p->p_sigacts;
  520         mtx_assert(&psp->ps_mtx, MA_OWNED);
  521 
  522         tf = td->td_frame;
  523         onstack = sigonstack(tf->tf_sp);
  524 
  525         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
  526             catcher, sig);
  527 
  528         /* Allocate and validate space for the signal handler context. */
  529         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
  530             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  531                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
  532                     td->td_sigstk.ss_size);
  533 #if defined(COMPAT_43)
  534                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  535 #endif
  536         } else {
  537                 fp = (struct sigframe *)td->td_frame->tf_sp;
  538         }
  539 
  540         /* Make room, keeping the stack aligned */
  541         fp--;
  542         fp = (struct sigframe *)STACKALIGN(fp);
  543 
  544         /* Fill in the frame to copy out */
  545         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
  546         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
  547         frame.sf_si = ksi->ksi_info;
  548         frame.sf_uc.uc_sigmask = *mask;
  549         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
  550             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  551         frame.sf_uc.uc_stack = td->td_sigstk;
  552         mtx_unlock(&psp->ps_mtx);
  553         PROC_UNLOCK(td->td_proc);
  554 
  555         /* Copy the sigframe out to the user's stack. */
  556         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
  557                 /* Process has trashed its stack. Kill it. */
  558                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
  559                 PROC_LOCK(p);
  560                 sigexit(td, SIGILL);
  561         }
  562 
  563         tf->tf_x[0]= sig;
  564         tf->tf_x[1] = (register_t)&fp->sf_si;
  565         tf->tf_x[2] = (register_t)&fp->sf_uc;
  566 
  567         tf->tf_elr = (register_t)catcher;
  568         tf->tf_sp = (register_t)fp;
  569         sysent = p->p_sysent;
  570         if (sysent->sv_sigcode_base != 0)
  571                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
  572         else
  573                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
  574                     *(sysent->sv_szsigcode));
  575 
  576         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
  577             tf->tf_sp);
  578 
  579         PROC_LOCK(p);
  580         mtx_lock(&psp->ps_mtx);
  581 }
  582 
  583 static void
  584 init_proc0(vm_offset_t kstack)
  585 {
  586         struct pcpu *pcpup = &__pcpu[0];
  587 
  588         proc_linkup0(&proc0, &thread0);
  589         thread0.td_kstack = kstack;
  590         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
  591         thread0.td_pcb->pcb_fpflags = 0;
  592         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
  593         thread0.td_frame = &proc0_tf;
  594         pcpup->pc_curpcb = thread0.td_pcb;
  595 }
  596 
  597 typedef struct {
  598         uint32_t type;
  599         uint64_t phys_start;
  600         uint64_t virt_start;
  601         uint64_t num_pages;
  602         uint64_t attr;
  603 } EFI_MEMORY_DESCRIPTOR;
  604 
  605 static int
  606 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
  607     u_int *physmap_idxp)
  608 {
  609         u_int i, insert_idx, _physmap_idx;
  610 
  611         _physmap_idx = *physmap_idxp;
  612 
  613         if (length == 0)
  614                 return (1);
  615 
  616         /*
  617          * Find insertion point while checking for overlap.  Start off by
  618          * assuming the new entry will be added to the end.
  619          */
  620         insert_idx = _physmap_idx;
  621         for (i = 0; i <= _physmap_idx; i += 2) {
  622                 if (base < physmap[i + 1]) {
  623                         if (base + length <= physmap[i]) {
  624                                 insert_idx = i;
  625                                 break;
  626                         }
  627                         if (boothowto & RB_VERBOSE)
  628                                 printf(
  629                     "Overlapping memory regions, ignoring second region\n");
  630                         return (1);
  631                 }
  632         }
  633 
  634         /* See if we can prepend to the next entry. */
  635         if (insert_idx <= _physmap_idx &&
  636             base + length == physmap[insert_idx]) {
  637                 physmap[insert_idx] = base;
  638                 return (1);
  639         }
  640 
  641         /* See if we can append to the previous entry. */
  642         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
  643                 physmap[insert_idx - 1] += length;
  644                 return (1);
  645         }
  646 
  647         _physmap_idx += 2;
  648         *physmap_idxp = _physmap_idx;
  649         if (_physmap_idx == PHYSMAP_SIZE) {
  650                 printf(
  651                 "Too many segments in the physical address map, giving up\n");
  652                 return (0);
  653         }
  654 
  655         /*
  656          * Move the last 'N' entries down to make room for the new
  657          * entry if needed.
  658          */
  659         for (i = _physmap_idx; i > insert_idx; i -= 2) {
  660                 physmap[i] = physmap[i - 2];
  661                 physmap[i + 1] = physmap[i - 1];
  662         }
  663 
  664         /* Insert the new entry. */
  665         physmap[insert_idx] = base;
  666         physmap[insert_idx + 1] = base + length;
  667         return (1);
  668 }
  669 
  670 #ifdef FDT
  671 static void
  672 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
  673     u_int *physmap_idxp)
  674 {
  675 
  676         for (int i = 0; i < mrcnt; i++) {
  677                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
  678                     physmap_idxp))
  679                         break;
  680         }
  681 }
  682 #endif
  683 
  684 #define efi_next_descriptor(ptr, size) \
  685         ((struct efi_md *)(((uint8_t *) ptr) + size))
  686 
  687 static void
  688 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
  689     u_int *physmap_idxp)
  690 {
  691         struct efi_md *map, *p;
  692         const char *type;
  693         size_t efisz;
  694         int ndesc, i;
  695 
  696         static const char *types[] = {
  697                 "Reserved",
  698                 "LoaderCode",
  699                 "LoaderData",
  700                 "BootServicesCode",
  701                 "BootServicesData",
  702                 "RuntimeServicesCode",
  703                 "RuntimeServicesData",
  704                 "ConventionalMemory",
  705                 "UnusableMemory",
  706                 "ACPIReclaimMemory",
  707                 "ACPIMemoryNVS",
  708                 "MemoryMappedIO",
  709                 "MemoryMappedIOPortSpace",
  710                 "PalCode"
  711         };
  712 
  713         /*
  714          * Memory map data provided by UEFI via the GetMemoryMap
  715          * Boot Services API.
  716          */
  717         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
  718         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
  719 
  720         if (efihdr->descriptor_size == 0)
  721                 return;
  722         ndesc = efihdr->memory_size / efihdr->descriptor_size;
  723 
  724         if (boothowto & RB_VERBOSE)
  725                 printf("%23s %12s %12s %8s %4s\n",
  726                     "Type", "Physical", "Virtual", "#Pages", "Attr");
  727 
  728         for (i = 0, p = map; i < ndesc; i++,
  729             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
  730                 if (boothowto & RB_VERBOSE) {
  731                         if (p->md_type <= EFI_MD_TYPE_PALCODE)
  732                                 type = types[p->md_type];
  733                         else
  734                                 type = "<INVALID>";
  735                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
  736                             p->md_virt, p->md_pages);
  737                         if (p->md_attr & EFI_MD_ATTR_UC)
  738                                 printf("UC ");
  739                         if (p->md_attr & EFI_MD_ATTR_WC)
  740                                 printf("WC ");
  741                         if (p->md_attr & EFI_MD_ATTR_WT)
  742                                 printf("WT ");
  743                         if (p->md_attr & EFI_MD_ATTR_WB)
  744                                 printf("WB ");
  745                         if (p->md_attr & EFI_MD_ATTR_UCE)
  746                                 printf("UCE ");
  747                         if (p->md_attr & EFI_MD_ATTR_WP)
  748                                 printf("WP ");
  749                         if (p->md_attr & EFI_MD_ATTR_RP)
  750                                 printf("RP ");
  751                         if (p->md_attr & EFI_MD_ATTR_XP)
  752                                 printf("XP ");
  753                         if (p->md_attr & EFI_MD_ATTR_RT)
  754                                 printf("RUNTIME");
  755                         printf("\n");
  756                 }
  757 
  758                 switch (p->md_type) {
  759                 case EFI_MD_TYPE_CODE:
  760                 case EFI_MD_TYPE_DATA:
  761                 case EFI_MD_TYPE_BS_CODE:
  762                 case EFI_MD_TYPE_BS_DATA:
  763                 case EFI_MD_TYPE_FREE:
  764                         /*
  765                          * We're allowed to use any entry with these types.
  766                          */
  767                         break;
  768                 default:
  769                         continue;
  770                 }
  771 
  772                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
  773                     physmap, physmap_idxp))
  774                         break;
  775         }
  776 }
  777 
  778 #ifdef FDT
  779 static void
  780 try_load_dtb(caddr_t kmdp)
  781 {
  782         vm_offset_t dtbp;
  783 
  784         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
  785         if (dtbp == (vm_offset_t)NULL) {
  786                 printf("ERROR loading DTB\n");
  787                 return;
  788         }
  789 
  790         if (OF_install(OFW_FDT, 0) == FALSE)
  791                 panic("Cannot install FDT");
  792 
  793         if (OF_init((void *)dtbp) != 0)
  794                 panic("OF_init failed with the found device tree");
  795 }
  796 #endif
  797 
  798 static void
  799 cache_setup(void)
  800 {
  801         int dcache_line_shift, icache_line_shift, dczva_line_shift;
  802         uint32_t ctr_el0;
  803         uint32_t dczid_el0;
  804 
  805         ctr_el0 = READ_SPECIALREG(ctr_el0);
  806 
  807         /* Read the log2 words in each D cache line */
  808         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
  809         /* Get the D cache line size */
  810         dcache_line_size = sizeof(int) << dcache_line_shift;
  811 
  812         /* And the same for the I cache */
  813         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
  814         icache_line_size = sizeof(int) << icache_line_shift;
  815 
  816         idcache_line_size = MIN(dcache_line_size, icache_line_size);
  817 
  818         dczid_el0 = READ_SPECIALREG(dczid_el0);
  819 
  820         /* Check if dc zva is not prohibited */
  821         if (dczid_el0 & DCZID_DZP)
  822                 dczva_line_size = 0;
  823         else {
  824                 /* Same as with above calculations */
  825                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
  826                 dczva_line_size = sizeof(int) << dczva_line_shift;
  827 
  828                 /* Change pagezero function */
  829                 pagezero = pagezero_cache;
  830         }
  831 }
  832 
  833 void
  834 initarm(struct arm64_bootparams *abp)
  835 {
  836         struct efi_map_header *efihdr;
  837         struct pcpu *pcpup;
  838 #ifdef FDT
  839         struct mem_region mem_regions[FDT_MEM_REGIONS];
  840         int mem_regions_sz;
  841 #endif
  842         vm_offset_t lastaddr;
  843         caddr_t kmdp;
  844         vm_paddr_t mem_len;
  845         int i;
  846 
  847         /* Set the module data location */
  848         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
  849 
  850         /* Find the kernel address */
  851         kmdp = preload_search_by_type("elf kernel");
  852         if (kmdp == NULL)
  853                 kmdp = preload_search_by_type("elf64 kernel");
  854 
  855         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
  856         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
  857 
  858 #ifdef FDT
  859         try_load_dtb(kmdp);
  860 #endif
  861 
  862         /* Find the address to start allocating from */
  863         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
  864 
  865         /* Load the physical memory ranges */
  866         physmap_idx = 0;
  867         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
  868             MODINFO_METADATA | MODINFOMD_EFI_MAP);
  869         if (efihdr != NULL)
  870                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
  871 #ifdef FDT
  872         else {
  873                 /* Grab physical memory regions information from device tree. */
  874                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
  875                     NULL) != 0)
  876                         panic("Cannot get physical memory regions");
  877                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
  878                     &physmap_idx);
  879         }
  880 #endif
  881 
  882         /* Print the memory map */
  883         mem_len = 0;
  884         for (i = 0; i < physmap_idx; i += 2) {
  885                 dump_avail[i] = physmap[i];
  886                 dump_avail[i + 1] = physmap[i + 1];
  887                 mem_len += physmap[i + 1] - physmap[i];
  888         }
  889         dump_avail[i] = 0;
  890         dump_avail[i + 1] = 0;
  891 
  892         /* Set the pcpu data, this is needed by pmap_bootstrap */
  893         pcpup = &__pcpu[0];
  894         pcpu_init(pcpup, 0, sizeof(struct pcpu));
  895 
  896         /*
  897          * Set the pcpu pointer with a backup in tpidr_el1 to be
  898          * loaded when entering the kernel from userland.
  899          */
  900         __asm __volatile(
  901             "mov x18, %0 \n"
  902             "msr tpidr_el1, %0" :: "r"(pcpup));
  903 
  904         PCPU_SET(curthread, &thread0);
  905 
  906         /* Do basic tuning, hz etc */
  907         init_param1();
  908 
  909         cache_setup();
  910 
  911         /* Bootstrap enough of pmap  to enter the kernel proper */
  912         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
  913             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
  914 
  915         devmap_bootstrap(0, NULL);
  916 
  917         cninit();
  918 
  919         init_proc0(abp->kern_stack);
  920         msgbufinit(msgbufp, msgbufsize);
  921         mutex_init();
  922         init_param2(physmem);
  923 
  924         dbg_monitor_init();
  925         kdb_init();
  926 
  927         early_boot = 0;
  928 }
  929 
  930 uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *,
  931     struct timecounter *);
  932 
  933 uint32_t
  934 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
  935 {
  936 
  937         return (arm_cpu_fill_vdso_timehands != NULL ?
  938             arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0);
  939 }
  940 
  941 #ifdef DDB
  942 #include <ddb/ddb.h>
  943 
  944 DB_SHOW_COMMAND(specialregs, db_show_spregs)
  945 {
  946 #define PRINT_REG(reg)  \
  947     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
  948 
  949         PRINT_REG(actlr_el1);
  950         PRINT_REG(afsr0_el1);
  951         PRINT_REG(afsr1_el1);
  952         PRINT_REG(aidr_el1);
  953         PRINT_REG(amair_el1);
  954         PRINT_REG(ccsidr_el1);
  955         PRINT_REG(clidr_el1);
  956         PRINT_REG(contextidr_el1);
  957         PRINT_REG(cpacr_el1);
  958         PRINT_REG(csselr_el1);
  959         PRINT_REG(ctr_el0);
  960         PRINT_REG(currentel);
  961         PRINT_REG(daif);
  962         PRINT_REG(dczid_el0);
  963         PRINT_REG(elr_el1);
  964         PRINT_REG(esr_el1);
  965         PRINT_REG(far_el1);
  966 #if 0
  967         /* ARM64TODO: Enable VFP before reading floating-point registers */
  968         PRINT_REG(fpcr);
  969         PRINT_REG(fpsr);
  970 #endif
  971         PRINT_REG(id_aa64afr0_el1);
  972         PRINT_REG(id_aa64afr1_el1);
  973         PRINT_REG(id_aa64dfr0_el1);
  974         PRINT_REG(id_aa64dfr1_el1);
  975         PRINT_REG(id_aa64isar0_el1);
  976         PRINT_REG(id_aa64isar1_el1);
  977         PRINT_REG(id_aa64pfr0_el1);
  978         PRINT_REG(id_aa64pfr1_el1);
  979         PRINT_REG(id_afr0_el1);
  980         PRINT_REG(id_dfr0_el1);
  981         PRINT_REG(id_isar0_el1);
  982         PRINT_REG(id_isar1_el1);
  983         PRINT_REG(id_isar2_el1);
  984         PRINT_REG(id_isar3_el1);
  985         PRINT_REG(id_isar4_el1);
  986         PRINT_REG(id_isar5_el1);
  987         PRINT_REG(id_mmfr0_el1);
  988         PRINT_REG(id_mmfr1_el1);
  989         PRINT_REG(id_mmfr2_el1);
  990         PRINT_REG(id_mmfr3_el1);
  991 #if 0
  992         /* Missing from llvm */
  993         PRINT_REG(id_mmfr4_el1);
  994 #endif
  995         PRINT_REG(id_pfr0_el1);
  996         PRINT_REG(id_pfr1_el1);
  997         PRINT_REG(isr_el1);
  998         PRINT_REG(mair_el1);
  999         PRINT_REG(midr_el1);
 1000         PRINT_REG(mpidr_el1);
 1001         PRINT_REG(mvfr0_el1);
 1002         PRINT_REG(mvfr1_el1);
 1003         PRINT_REG(mvfr2_el1);
 1004         PRINT_REG(revidr_el1);
 1005         PRINT_REG(sctlr_el1);
 1006         PRINT_REG(sp_el0);
 1007         PRINT_REG(spsel);
 1008         PRINT_REG(spsr_el1);
 1009         PRINT_REG(tcr_el1);
 1010         PRINT_REG(tpidr_el0);
 1011         PRINT_REG(tpidr_el1);
 1012         PRINT_REG(tpidrro_el0);
 1013         PRINT_REG(ttbr0_el1);
 1014         PRINT_REG(ttbr1_el1);
 1015         PRINT_REG(vbar_el1);
 1016 #undef PRINT_REG
 1017 }
 1018 
 1019 DB_SHOW_COMMAND(vtop, db_show_vtop)
 1020 {
 1021         uint64_t phys;
 1022 
 1023         if (have_addr) {
 1024                 phys = arm64_address_translate_s1e1r(addr);
 1025                 db_printf("Physical address reg: 0x%016lx\n", phys);
 1026         } else
 1027                 db_printf("show vtop <virt_addr>\n");
 1028 }
 1029 #endif

Cache object: 299f4b4e08b23b462d8874d5b6c990c5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.