The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 2004 Olivier Houchard
    5  * Copyright (c) 1994-1998 Mark Brinicombe.
    6  * Copyright (c) 1994 Brini.
    7  * All rights reserved.
    8  *
    9  * This code is derived from software written for Brini by Mark Brinicombe
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *      This product includes software developed by Mark Brinicombe
   22  *      for the NetBSD Project.
   23  * 4. The name of the company nor the name of the author may be used to
   24  *    endorse or promote products derived from this software without specific
   25  *    prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   28  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   30  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   37  * SUCH DAMAGE.
   38  *
   39  * Machine dependant functions for kernel setup
   40  *
   41  * Created      : 17/09/94
   42  * Updated      : 18/04/01 updated for new wscons
   43  */
   44 
   45 #include "opt_compat.h"
   46 #include "opt_ddb.h"
   47 #include "opt_platform.h"
   48 #include "opt_sched.h"
   49 #include "opt_timer.h"
   50 
   51 #include <sys/cdefs.h>
   52 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/machdep.c 255091 2013-08-31 07:08:21Z rpaulo $");
   53 
   54 #include <sys/param.h>
   55 #include <sys/proc.h>
   56 #include <sys/systm.h>
   57 #include <sys/bio.h>
   58 #include <sys/buf.h>
   59 #include <sys/bus.h>
   60 #include <sys/cons.h>
   61 #include <sys/cpu.h>
   62 #include <sys/exec.h>
   63 #include <sys/imgact.h>
   64 #include <sys/kdb.h>
   65 #include <sys/kernel.h>
   66 #include <sys/ktr.h>
   67 #include <sys/linker.h>
   68 #include <sys/lock.h>
   69 #include <sys/malloc.h>
   70 #include <sys/msgbuf.h>
   71 #include <sys/mutex.h>
   72 #include <sys/pcpu.h>
   73 #include <sys/ptrace.h>
   74 #include <sys/rwlock.h>
   75 #include <sys/sched.h>
   76 #include <sys/signalvar.h>
   77 #include <sys/syscallsubr.h>
   78 #include <sys/sysctl.h>
   79 #include <sys/sysent.h>
   80 #include <sys/sysproto.h>
   81 #include <sys/uio.h>
   82 
   83 #include <vm/vm.h>
   84 #include <vm/pmap.h>
   85 #include <vm/vm_map.h>
   86 #include <vm/vm_object.h>
   87 #include <vm/vm_page.h>
   88 #include <vm/vm_pager.h>
   89 
   90 #include <machine/armreg.h>
   91 #include <machine/atags.h>
   92 #include <machine/cpu.h>
   93 #include <machine/machdep.h>
   94 #include <machine/md_var.h>
   95 #include <machine/metadata.h>
   96 #include <machine/pcb.h>
   97 #include <machine/pmap.h>
   98 #include <machine/reg.h>
   99 #include <machine/trap.h>
  100 #include <machine/undefined.h>
  101 #include <machine/vmparam.h>
  102 #include <machine/sysarch.h>
  103 
  104 #ifdef FDT
  105 #include <dev/fdt/fdt_common.h>
  106 #include <dev/ofw/openfirm.h>
  107 #endif
  108 
  109 #ifdef DEBUG
  110 #define debugf(fmt, args...) printf(fmt, ##args)
  111 #else
  112 #define debugf(fmt, args...)
  113 #endif
  114 
  115 struct pcpu __pcpu[MAXCPU];
  116 struct pcpu *pcpup = &__pcpu[0];
  117 
  118 static struct trapframe proc0_tf;
  119 uint32_t cpu_reset_address = 0;
  120 int cold = 1;
  121 vm_offset_t vector_page;
  122 
  123 long realmem = 0;
  124 
  125 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
  126 int (*_arm_bzero)(void *, int, int) = NULL;
  127 int _min_memcpy_size = 0;
  128 int _min_bzero_size = 0;
  129 
  130 extern int *end;
  131 #ifdef DDB
  132 extern vm_offset_t ksym_start, ksym_end;
  133 #endif
  134 
  135 #ifdef FDT
  136 /*
  137  * This is the number of L2 page tables required for covering max
  138  * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
  139  * stacks etc.), uprounded to be divisible by 4.
  140  */
  141 #define KERNEL_PT_MAX   78
  142 
  143 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
  144 
  145 vm_paddr_t phys_avail[10];
  146 vm_paddr_t dump_avail[4];
  147 
  148 extern u_int data_abort_handler_address;
  149 extern u_int prefetch_abort_handler_address;
  150 extern u_int undefined_handler_address;
  151 
  152 vm_paddr_t pmap_pa;
  153 
  154 struct pv_addr systempage;
  155 static struct pv_addr msgbufpv;
  156 struct pv_addr irqstack;
  157 struct pv_addr undstack;
  158 struct pv_addr abtstack;
  159 static struct pv_addr kernelstack;
  160 
  161 const struct pmap_devmap *pmap_devmap_bootstrap_table;
  162 #endif
  163 
  164 #if defined(LINUX_BOOT_ABI)
  165 #define LBABI_MAX_BANKS 10
  166 
  167 uint32_t board_id;
  168 struct arm_lbabi_tag *atag_list;
  169 char linux_command_line[LBABI_MAX_COMMAND_LINE + 1];
  170 char atags[LBABI_MAX_COMMAND_LINE * 2];
  171 uint32_t memstart[LBABI_MAX_BANKS];
  172 uint32_t memsize[LBABI_MAX_BANKS];
  173 uint32_t membanks;
  174 #endif
  175 
  176 static uint32_t board_revision;
  177 /* hex representation of uint64_t */
  178 static char board_serial[32];
  179 
  180 SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes");
  181 SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD,
  182     &board_revision, 0, "Board revision");
  183 SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD,
  184     board_serial, 0, "Board serial");
  185 
  186 int vfp_exists;
  187 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
  188     &vfp_exists, 0, "Floating point support enabled");
  189 
  190 void
  191 board_set_serial(uint64_t serial)
  192 {
  193 
  194         snprintf(board_serial, sizeof(board_serial)-1, 
  195                     "%016jx", serial);
  196 }
  197 
  198 void
  199 board_set_revision(uint32_t revision)
  200 {
  201 
  202         board_revision = revision;
  203 }
  204 
  205 void
  206 sendsig(catcher, ksi, mask)
  207         sig_t catcher;
  208         ksiginfo_t *ksi;
  209         sigset_t *mask;
  210 {
  211         struct thread *td;
  212         struct proc *p;
  213         struct trapframe *tf;
  214         struct sigframe *fp, frame;
  215         struct sigacts *psp;
  216         int onstack;
  217         int sig;
  218         int code;
  219 
  220         td = curthread;
  221         p = td->td_proc;
  222         PROC_LOCK_ASSERT(p, MA_OWNED);
  223         sig = ksi->ksi_signo;
  224         code = ksi->ksi_code;
  225         psp = p->p_sigacts;
  226         mtx_assert(&psp->ps_mtx, MA_OWNED);
  227         tf = td->td_frame;
  228         onstack = sigonstack(tf->tf_usr_sp);
  229 
  230         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
  231             catcher, sig);
  232 
  233         /* Allocate and validate space for the signal handler context. */
  234         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
  235             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  236                 fp = (struct sigframe *)(td->td_sigstk.ss_sp +
  237                     td->td_sigstk.ss_size);
  238 #if defined(COMPAT_43)
  239                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  240 #endif
  241         } else
  242                 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
  243 
  244         /* make room on the stack */
  245         fp--;
  246         
  247         /* make the stack aligned */
  248         fp = (struct sigframe *)STACKALIGN(fp);
  249         /* Populate the siginfo frame. */
  250         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
  251         frame.sf_si = ksi->ksi_info;
  252         frame.sf_uc.uc_sigmask = *mask;
  253         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
  254             ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  255         frame.sf_uc.uc_stack = td->td_sigstk;
  256         mtx_unlock(&psp->ps_mtx);
  257         PROC_UNLOCK(td->td_proc);
  258 
  259         /* Copy the sigframe out to the user's stack. */
  260         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
  261                 /* Process has trashed its stack. Kill it. */
  262                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
  263                 PROC_LOCK(p);
  264                 sigexit(td, SIGILL);
  265         }
  266 
  267         /* Translate the signal if appropriate. */
  268         if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
  269                 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
  270 
  271         /*
  272          * Build context to run handler in.  We invoke the handler
  273          * directly, only returning via the trampoline.  Note the
  274          * trampoline version numbers are coordinated with machine-
  275          * dependent code in libc.
  276          */
  277         
  278         tf->tf_r0 = sig;
  279         tf->tf_r1 = (register_t)&fp->sf_si;
  280         tf->tf_r2 = (register_t)&fp->sf_uc;
  281 
  282         /* the trampoline uses r5 as the uc address */
  283         tf->tf_r5 = (register_t)&fp->sf_uc;
  284         tf->tf_pc = (register_t)catcher;
  285         tf->tf_usr_sp = (register_t)fp;
  286         tf->tf_usr_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
  287 
  288         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
  289             tf->tf_usr_sp);
  290 
  291         PROC_LOCK(p);
  292         mtx_lock(&psp->ps_mtx);
  293 }
  294 
  295 struct kva_md_info kmi;
  296 
  297 /*
  298  * arm32_vector_init:
  299  *
  300  *      Initialize the vector page, and select whether or not to
  301  *      relocate the vectors.
  302  *
  303  *      NOTE: We expect the vector page to be mapped at its expected
  304  *      destination.
  305  */
  306 
  307 extern unsigned int page0[], page0_data[];
  308 void
  309 arm_vector_init(vm_offset_t va, int which)
  310 {
  311         unsigned int *vectors = (int *) va;
  312         unsigned int *vectors_data = vectors + (page0_data - page0);
  313         int vec;
  314 
  315         /*
  316          * Loop through the vectors we're taking over, and copy the
  317          * vector's insn and data word.
  318          */
  319         for (vec = 0; vec < ARM_NVEC; vec++) {
  320                 if ((which & (1 << vec)) == 0) {
  321                         /* Don't want to take over this vector. */
  322                         continue;
  323                 }
  324                 vectors[vec] = page0[vec];
  325                 vectors_data[vec] = page0_data[vec];
  326         }
  327 
  328         /* Now sync the vectors. */
  329         cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
  330 
  331         vector_page = va;
  332 
  333         if (va == ARM_VECTORS_HIGH) {
  334                 /*
  335                  * Assume the MD caller knows what it's doing here, and
  336                  * really does want the vector page relocated.
  337                  *
  338                  * Note: This has to be done here (and not just in
  339                  * cpu_setup()) because the vector page needs to be
  340                  * accessible *before* cpu_startup() is called.
  341                  * Think ddb(9) ...
  342                  *
  343                  * NOTE: If the CPU control register is not readable,
  344                  * this will totally fail!  We'll just assume that
  345                  * any system that has high vector support has a
  346                  * readable CPU control register, for now.  If we
  347                  * ever encounter one that does not, we'll have to
  348                  * rethink this.
  349                  */
  350                 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
  351         }
  352 }
  353 
  354 static void
  355 cpu_startup(void *dummy)
  356 {
  357         struct pcb *pcb = thread0.td_pcb;
  358 #ifdef ARM_TP_ADDRESS
  359 #ifndef ARM_CACHE_LOCK_ENABLE
  360         vm_page_t m;
  361 #endif
  362 #endif
  363 
  364         cpu_setup("");
  365         identify_arm_cpu();
  366 
  367         printf("real memory  = %ju (%ju MB)\n", (uintmax_t)ptoa(physmem),
  368             (uintmax_t)ptoa(physmem) / 1048576);
  369         realmem = physmem;
  370 
  371         /*
  372          * Display the RAM layout.
  373          */
  374         if (bootverbose) {
  375                 int indx;
  376 
  377                 printf("Physical memory chunk(s):\n");
  378                 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
  379                         vm_paddr_t size;
  380 
  381                         size = phys_avail[indx + 1] - phys_avail[indx];
  382                         printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
  383                             (uintmax_t)phys_avail[indx],
  384                             (uintmax_t)phys_avail[indx + 1] - 1,
  385                             (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
  386                 }
  387         }
  388 
  389         vm_ksubmap_init(&kmi);
  390 
  391         printf("avail memory = %ju (%ju MB)\n",
  392             (uintmax_t)ptoa(cnt.v_free_count),
  393             (uintmax_t)ptoa(cnt.v_free_count) / 1048576);
  394 
  395         bufinit();
  396         vm_pager_bufferinit();
  397         pcb->un_32.pcb32_und_sp = (u_int)thread0.td_kstack +
  398             USPACE_UNDEF_STACK_TOP;
  399         pcb->un_32.pcb32_sp = (u_int)thread0.td_kstack +
  400             USPACE_SVC_STACK_TOP;
  401         vector_page_setprot(VM_PROT_READ);
  402         pmap_set_pcb_pagedir(pmap_kernel(), pcb);
  403         pmap_postinit();
  404 #ifdef ARM_TP_ADDRESS
  405 #ifdef ARM_CACHE_LOCK_ENABLE
  406         pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
  407         arm_lock_cache_line(ARM_TP_ADDRESS);
  408 #else
  409         m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
  410         pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
  411 #endif
  412         *(uint32_t *)ARM_RAS_START = 0;
  413         *(uint32_t *)ARM_RAS_END = 0xffffffff;
  414 #endif
  415 }
  416 
  417 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
  418 
  419 /*
  420  * Flush the D-cache for non-DMA I/O so that the I-cache can
  421  * be made coherent later.
  422  */
  423 void
  424 cpu_flush_dcache(void *ptr, size_t len)
  425 {
  426 
  427         cpu_dcache_wb_range((uintptr_t)ptr, len);
  428         cpu_l2cache_wb_range((uintptr_t)ptr, len);
  429 }
  430 
  431 /* Get current clock frequency for the given cpu id. */
  432 int
  433 cpu_est_clockrate(int cpu_id, uint64_t *rate)
  434 {
  435 
  436         return (ENXIO);
  437 }
  438 
  439 void
  440 cpu_idle(int busy)
  441 {
  442         
  443         CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
  444             busy, curcpu);
  445 #ifndef NO_EVENTTIMERS
  446         if (!busy) {
  447                 critical_enter();
  448                 cpu_idleclock();
  449         }
  450 #endif
  451         if (!sched_runnable())
  452                 cpu_sleep(0);
  453 #ifndef NO_EVENTTIMERS
  454         if (!busy) {
  455                 cpu_activeclock();
  456                 critical_exit();
  457         }
  458 #endif
  459         CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
  460             busy, curcpu);
  461 }
  462 
  463 int
  464 cpu_idle_wakeup(int cpu)
  465 {
  466 
  467         return (0);
  468 }
  469 
  470 int
  471 fill_regs(struct thread *td, struct reg *regs)
  472 {
  473         struct trapframe *tf = td->td_frame;
  474         bcopy(&tf->tf_r0, regs->r, sizeof(regs->r));
  475         regs->r_sp = tf->tf_usr_sp;
  476         regs->r_lr = tf->tf_usr_lr;
  477         regs->r_pc = tf->tf_pc;
  478         regs->r_cpsr = tf->tf_spsr;
  479         return (0);
  480 }
  481 int
  482 fill_fpregs(struct thread *td, struct fpreg *regs)
  483 {
  484         bzero(regs, sizeof(*regs));
  485         return (0);
  486 }
  487 
  488 int
  489 set_regs(struct thread *td, struct reg *regs)
  490 {
  491         struct trapframe *tf = td->td_frame;
  492         
  493         bcopy(regs->r, &tf->tf_r0, sizeof(regs->r));
  494         tf->tf_usr_sp = regs->r_sp;
  495         tf->tf_usr_lr = regs->r_lr;
  496         tf->tf_pc = regs->r_pc;
  497         tf->tf_spsr &=  ~PSR_FLAGS;
  498         tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS;
  499         return (0);                                                             
  500 }
  501 
  502 int
  503 set_fpregs(struct thread *td, struct fpreg *regs)
  504 {
  505         return (0);
  506 }
  507 
  508 int
  509 fill_dbregs(struct thread *td, struct dbreg *regs)
  510 {
  511         return (0);
  512 }
  513 int
  514 set_dbregs(struct thread *td, struct dbreg *regs)
  515 {
  516         return (0);
  517 }
  518 
  519 
  520 static int
  521 ptrace_read_int(struct thread *td, vm_offset_t addr, u_int32_t *v)
  522 {
  523         struct iovec iov;
  524         struct uio uio;
  525 
  526         PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
  527         iov.iov_base = (caddr_t) v;
  528         iov.iov_len = sizeof(u_int32_t);
  529         uio.uio_iov = &iov;
  530         uio.uio_iovcnt = 1;
  531         uio.uio_offset = (off_t)addr;
  532         uio.uio_resid = sizeof(u_int32_t);
  533         uio.uio_segflg = UIO_SYSSPACE;
  534         uio.uio_rw = UIO_READ;
  535         uio.uio_td = td;
  536         return proc_rwmem(td->td_proc, &uio);
  537 }
  538 
  539 static int
  540 ptrace_write_int(struct thread *td, vm_offset_t addr, u_int32_t v)
  541 {
  542         struct iovec iov;
  543         struct uio uio;
  544 
  545         PROC_LOCK_ASSERT(td->td_proc, MA_NOTOWNED);
  546         iov.iov_base = (caddr_t) &v;
  547         iov.iov_len = sizeof(u_int32_t);
  548         uio.uio_iov = &iov;
  549         uio.uio_iovcnt = 1;
  550         uio.uio_offset = (off_t)addr;
  551         uio.uio_resid = sizeof(u_int32_t);
  552         uio.uio_segflg = UIO_SYSSPACE;
  553         uio.uio_rw = UIO_WRITE;
  554         uio.uio_td = td;
  555         return proc_rwmem(td->td_proc, &uio);
  556 }
  557 
  558 int
  559 ptrace_single_step(struct thread *td)
  560 {
  561         struct proc *p;
  562         int error;
  563         
  564         KASSERT(td->td_md.md_ptrace_instr == 0,
  565          ("Didn't clear single step"));
  566         p = td->td_proc;
  567         PROC_UNLOCK(p);
  568         error = ptrace_read_int(td, td->td_frame->tf_pc + 4,
  569             &td->td_md.md_ptrace_instr);
  570         if (error)
  571                 goto out;
  572         error = ptrace_write_int(td, td->td_frame->tf_pc + 4,
  573             PTRACE_BREAKPOINT);
  574         if (error)
  575                 td->td_md.md_ptrace_instr = 0;
  576         td->td_md.md_ptrace_addr = td->td_frame->tf_pc + 4;
  577 out:
  578         PROC_LOCK(p);
  579         return (error);
  580 }
  581 
  582 int
  583 ptrace_clear_single_step(struct thread *td)
  584 {
  585         struct proc *p;
  586 
  587         if (td->td_md.md_ptrace_instr) {
  588                 p = td->td_proc;
  589                 PROC_UNLOCK(p);
  590                 ptrace_write_int(td, td->td_md.md_ptrace_addr,
  591                     td->td_md.md_ptrace_instr);
  592                 PROC_LOCK(p);
  593                 td->td_md.md_ptrace_instr = 0;
  594         }
  595         return (0);
  596 }
  597 
  598 int
  599 ptrace_set_pc(struct thread *td, unsigned long addr)
  600 {
  601         td->td_frame->tf_pc = addr;
  602         return (0);
  603 }
  604 
  605 void
  606 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  607 {
  608 }
  609 
  610 void
  611 spinlock_enter(void)
  612 {
  613         struct thread *td;
  614         register_t cspr;
  615 
  616         td = curthread;
  617         if (td->td_md.md_spinlock_count == 0) {
  618                 cspr = disable_interrupts(I32_bit | F32_bit);
  619                 td->td_md.md_spinlock_count = 1;
  620                 td->td_md.md_saved_cspr = cspr;
  621         } else
  622                 td->td_md.md_spinlock_count++;
  623         critical_enter();
  624 }
  625 
  626 void
  627 spinlock_exit(void)
  628 {
  629         struct thread *td;
  630         register_t cspr;
  631 
  632         td = curthread;
  633         critical_exit();
  634         cspr = td->td_md.md_saved_cspr;
  635         td->td_md.md_spinlock_count--;
  636         if (td->td_md.md_spinlock_count == 0)
  637                 restore_interrupts(cspr);
  638 }
  639 
  640 /*
  641  * Clear registers on exec
  642  */
  643 void
  644 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
  645 {
  646         struct trapframe *tf = td->td_frame;
  647 
  648         memset(tf, 0, sizeof(*tf));
  649         tf->tf_usr_sp = stack;
  650         tf->tf_usr_lr = imgp->entry_addr;
  651         tf->tf_svc_lr = 0x77777777;
  652         tf->tf_pc = imgp->entry_addr;
  653         tf->tf_spsr = PSR_USR32_MODE;
  654 }
  655 
  656 /*
  657  * Get machine context.
  658  */
  659 int
  660 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
  661 {
  662         struct trapframe *tf = td->td_frame;
  663         __greg_t *gr = mcp->__gregs;
  664 
  665         if (clear_ret & GET_MC_CLEAR_RET)
  666                 gr[_REG_R0] = 0;
  667         else
  668                 gr[_REG_R0]   = tf->tf_r0;
  669         gr[_REG_R1]   = tf->tf_r1;
  670         gr[_REG_R2]   = tf->tf_r2;
  671         gr[_REG_R3]   = tf->tf_r3;
  672         gr[_REG_R4]   = tf->tf_r4;
  673         gr[_REG_R5]   = tf->tf_r5;
  674         gr[_REG_R6]   = tf->tf_r6;
  675         gr[_REG_R7]   = tf->tf_r7;
  676         gr[_REG_R8]   = tf->tf_r8;
  677         gr[_REG_R9]   = tf->tf_r9;
  678         gr[_REG_R10]  = tf->tf_r10;
  679         gr[_REG_R11]  = tf->tf_r11;
  680         gr[_REG_R12]  = tf->tf_r12;
  681         gr[_REG_SP]   = tf->tf_usr_sp;
  682         gr[_REG_LR]   = tf->tf_usr_lr;
  683         gr[_REG_PC]   = tf->tf_pc;
  684         gr[_REG_CPSR] = tf->tf_spsr;
  685 
  686         return (0);
  687 }
  688 
  689 /*
  690  * Set machine context.
  691  *
  692  * However, we don't set any but the user modifiable flags, and we won't
  693  * touch the cs selector.
  694  */
  695 int
  696 set_mcontext(struct thread *td, const mcontext_t *mcp)
  697 {
  698         struct trapframe *tf = td->td_frame;
  699         const __greg_t *gr = mcp->__gregs;
  700 
  701         tf->tf_r0 = gr[_REG_R0];
  702         tf->tf_r1 = gr[_REG_R1];
  703         tf->tf_r2 = gr[_REG_R2];
  704         tf->tf_r3 = gr[_REG_R3];
  705         tf->tf_r4 = gr[_REG_R4];
  706         tf->tf_r5 = gr[_REG_R5];
  707         tf->tf_r6 = gr[_REG_R6];
  708         tf->tf_r7 = gr[_REG_R7];
  709         tf->tf_r8 = gr[_REG_R8];
  710         tf->tf_r9 = gr[_REG_R9];
  711         tf->tf_r10 = gr[_REG_R10];
  712         tf->tf_r11 = gr[_REG_R11];
  713         tf->tf_r12 = gr[_REG_R12];
  714         tf->tf_usr_sp = gr[_REG_SP];
  715         tf->tf_usr_lr = gr[_REG_LR];
  716         tf->tf_pc = gr[_REG_PC];
  717         tf->tf_spsr = gr[_REG_CPSR];
  718 
  719         return (0);
  720 }
  721 
  722 /*
  723  * MPSAFE
  724  */
  725 int
  726 sys_sigreturn(td, uap)
  727         struct thread *td;
  728         struct sigreturn_args /* {
  729                 const struct __ucontext *sigcntxp;
  730         } */ *uap;
  731 {
  732         struct sigframe sf;
  733         struct trapframe *tf;
  734         int spsr;
  735         
  736         if (uap == NULL)
  737                 return (EFAULT);
  738         if (copyin(uap->sigcntxp, &sf, sizeof(sf)))
  739                 return (EFAULT);
  740         /*
  741          * Make sure the processor mode has not been tampered with and
  742          * interrupts have not been disabled.
  743          */
  744         spsr = sf.sf_uc.uc_mcontext.__gregs[_REG_CPSR];
  745         if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
  746             (spsr & (I32_bit | F32_bit)) != 0)
  747                 return (EINVAL);
  748                 /* Restore register context. */
  749         tf = td->td_frame;
  750         set_mcontext(td, &sf.sf_uc.uc_mcontext);
  751 
  752         /* Restore signal mask. */
  753         kern_sigprocmask(td, SIG_SETMASK, &sf.sf_uc.uc_sigmask, NULL, 0);
  754 
  755         return (EJUSTRETURN);
  756 }
  757 
  758 
  759 /*
  760  * Construct a PCB from a trapframe. This is called from kdb_trap() where
  761  * we want to start a backtrace from the function that caused us to enter
  762  * the debugger. We have the context in the trapframe, but base the trace
  763  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
  764  * enough for a backtrace.
  765  */
  766 void
  767 makectx(struct trapframe *tf, struct pcb *pcb)
  768 {
  769         pcb->un_32.pcb32_r8 = tf->tf_r8;
  770         pcb->un_32.pcb32_r9 = tf->tf_r9;
  771         pcb->un_32.pcb32_r10 = tf->tf_r10;
  772         pcb->un_32.pcb32_r11 = tf->tf_r11;
  773         pcb->un_32.pcb32_r12 = tf->tf_r12;
  774         pcb->un_32.pcb32_pc = tf->tf_pc;
  775         pcb->un_32.pcb32_lr = tf->tf_usr_lr;
  776         pcb->un_32.pcb32_sp = tf->tf_usr_sp;
  777 }
  778 
  779 /*
  780  * Make a standard dump_avail array.  Can't make the phys_avail
  781  * since we need to do that after we call pmap_bootstrap, but this
  782  * is needed before pmap_boostrap.
  783  *
  784  * ARM_USE_SMALL_ALLOC uses dump_avail, so it must be filled before
  785  * calling pmap_bootstrap.
  786  */
  787 void
  788 arm_dump_avail_init(vm_offset_t ramsize, size_t max)
  789 {
  790 #ifdef LINUX_BOOT_ABI
  791         /*
  792          * Linux boot loader passes us the actual banks of memory, so use them
  793          * to construct the dump_avail array.
  794          */
  795         if (membanks > 0) 
  796         {
  797                 int i, j;
  798 
  799                 if (max < (membanks + 1) * 2)
  800                         panic("dump_avail[%d] too small for %d banks\n",
  801                             max, membanks);
  802                 for (j = 0, i = 0; i < membanks; i++) {
  803                         dump_avail[j++] = round_page(memstart[i]);
  804                         dump_avail[j++] = trunc_page(memstart[i] + memsize[i]);
  805                 }
  806                 dump_avail[j++] = 0;
  807                 dump_avail[j++] = 0;
  808                 return;
  809         }
  810 #endif
  811         if (max < 4)
  812                 panic("dump_avail too small\n");
  813 
  814         dump_avail[0] = round_page(PHYSADDR);
  815         dump_avail[1] = trunc_page(PHYSADDR + ramsize);
  816         dump_avail[2] = 0;
  817         dump_avail[3] = 0;
  818 }
  819 
  820 /*
  821  * Fake up a boot descriptor table
  822  */
  823 vm_offset_t
  824 fake_preload_metadata(struct arm_boot_params *abp __unused)
  825 {
  826 #ifdef DDB
  827         vm_offset_t zstart = 0, zend = 0;
  828 #endif
  829         vm_offset_t lastaddr;
  830         int i = 0;
  831         static uint32_t fake_preload[35];
  832 
  833         fake_preload[i++] = MODINFO_NAME;
  834         fake_preload[i++] = strlen("kernel") + 1;
  835         strcpy((char*)&fake_preload[i++], "kernel");
  836         i += 1;
  837         fake_preload[i++] = MODINFO_TYPE;
  838         fake_preload[i++] = strlen("elf kernel") + 1;
  839         strcpy((char*)&fake_preload[i++], "elf kernel");
  840         i += 2;
  841         fake_preload[i++] = MODINFO_ADDR;
  842         fake_preload[i++] = sizeof(vm_offset_t);
  843         fake_preload[i++] = KERNVIRTADDR;
  844         fake_preload[i++] = MODINFO_SIZE;
  845         fake_preload[i++] = sizeof(uint32_t);
  846         fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR;
  847 #ifdef DDB
  848         if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) {
  849                 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM;
  850                 fake_preload[i++] = sizeof(vm_offset_t);
  851                 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4);
  852                 fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM;
  853                 fake_preload[i++] = sizeof(vm_offset_t);
  854                 fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8);
  855                 lastaddr = *(uint32_t *)(KERNVIRTADDR + 8);
  856                 zend = lastaddr;
  857                 zstart = *(uint32_t *)(KERNVIRTADDR + 4);
  858                 ksym_start = zstart;
  859                 ksym_end = zend;
  860         } else
  861 #endif
  862                 lastaddr = (vm_offset_t)&end;
  863         fake_preload[i++] = 0;
  864         fake_preload[i] = 0;
  865         preload_metadata = (void *)fake_preload;
  866 
  867         return (lastaddr);
  868 }
  869 
  870 void
  871 pcpu0_init(void)
  872 {
  873 #if ARM_ARCH_6 || ARM_ARCH_7A || defined(CPU_MV_PJ4B)
  874         set_pcpu(pcpup);
  875 #endif
  876         pcpu_init(pcpup, 0, sizeof(struct pcpu));
  877         PCPU_SET(curthread, &thread0);
  878 #ifdef VFP
  879         PCPU_SET(cpu, 0);
  880 #endif
  881 }
  882 
  883 #if defined(LINUX_BOOT_ABI)
  884 vm_offset_t
  885 linux_parse_boot_param(struct arm_boot_params *abp)
  886 {
  887         struct arm_lbabi_tag *walker;
  888         uint32_t revision;
  889         uint64_t serial;
  890 
  891         /*
  892          * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2
  893          * is atags or dtb pointer.  If all of these aren't satisfied,
  894          * then punt.
  895          */
  896         if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0))
  897                 return 0;
  898 
  899         board_id = abp->abp_r1;
  900         walker = (struct arm_lbabi_tag *)
  901             (abp->abp_r2 + KERNVIRTADDR - KERNPHYSADDR);
  902 
  903         /* xxx - Need to also look for binary device tree */
  904         if (ATAG_TAG(walker) != ATAG_CORE)
  905                 return 0;
  906 
  907         atag_list = walker;
  908         while (ATAG_TAG(walker) != ATAG_NONE) {
  909                 switch (ATAG_TAG(walker)) {
  910                 case ATAG_CORE:
  911                         break;
  912                 case ATAG_MEM:
  913                         if (membanks < LBABI_MAX_BANKS) {
  914                                 memstart[membanks] = walker->u.tag_mem.start;
  915                                 memsize[membanks] = walker->u.tag_mem.size;
  916                         }
  917                         membanks++;
  918                         break;
  919                 case ATAG_INITRD2:
  920                         break;
  921                 case ATAG_SERIAL:
  922                         serial = walker->u.tag_sn.low |
  923                             ((uint64_t)walker->u.tag_sn.high << 32);
  924                         board_set_serial(serial);
  925                         break;
  926                 case ATAG_REVISION:
  927                         revision = walker->u.tag_rev.rev;
  928                         board_set_revision(revision);
  929                         break;
  930                 case ATAG_CMDLINE:
  931                         /* XXX open question: Parse this for boothowto? */
  932                         bcopy(walker->u.tag_cmd.command, linux_command_line,
  933                               ATAG_SIZE(walker));
  934                         break;
  935                 default:
  936                         break;
  937                 }
  938                 walker = ATAG_NEXT(walker);
  939         }
  940 
  941         /* Save a copy for later */
  942         bcopy(atag_list, atags,
  943             (char *)walker - (char *)atag_list + ATAG_SIZE(walker));
  944 
  945         return fake_preload_metadata(abp);
  946 }
  947 #endif
  948 
  949 #if defined(FREEBSD_BOOT_LOADER)
  950 vm_offset_t
  951 freebsd_parse_boot_param(struct arm_boot_params *abp)
  952 {
  953         vm_offset_t lastaddr = 0;
  954         void *mdp;
  955         void *kmdp;
  956 
  957         /*
  958          * Mask metadata pointer: it is supposed to be on page boundary. If
  959          * the first argument (mdp) doesn't point to a valid address the
  960          * bootloader must have passed us something else than the metadata
  961          * ptr, so we give up.  Also give up if we cannot find metadta section
  962          * the loader creates that we get all this data out of.
  963          */
  964 
  965         if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL)
  966                 return 0;
  967         preload_metadata = mdp;
  968         kmdp = preload_search_by_type("elf kernel");
  969         if (kmdp == NULL)
  970                 return 0;
  971 
  972         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
  973         kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
  974         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
  975 #ifdef DDB
  976         ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
  977         ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
  978 #endif
  979         preload_addr_relocate = KERNVIRTADDR - KERNPHYSADDR;
  980         return lastaddr;
  981 }
  982 #endif
  983 
  984 vm_offset_t
  985 default_parse_boot_param(struct arm_boot_params *abp)
  986 {
  987         vm_offset_t lastaddr;
  988 
  989 #if defined(LINUX_BOOT_ABI)
  990         if ((lastaddr = linux_parse_boot_param(abp)) != 0)
  991                 return lastaddr;
  992 #endif
  993 #if defined(FREEBSD_BOOT_LOADER)
  994         if ((lastaddr = freebsd_parse_boot_param(abp)) != 0)
  995                 return lastaddr;
  996 #endif
  997         /* Fall back to hardcoded metadata. */
  998         lastaddr = fake_preload_metadata(abp);
  999 
 1000         return lastaddr;
 1001 }
 1002 
 1003 /*
 1004  * Stub version of the boot parameter parsing routine.  We are
 1005  * called early in initarm, before even VM has been initialized.
 1006  * This routine needs to preserve any data that the boot loader
 1007  * has passed in before the kernel starts to grow past the end
 1008  * of the BSS, traditionally the place boot-loaders put this data.
 1009  *
 1010  * Since this is called so early, things that depend on the vm system
 1011  * being setup (including access to some SoC's serial ports), about
 1012  * all that can be done in this routine is to copy the arguments.
 1013  *
 1014  * This is the default boot parameter parsing routine.  Individual
 1015  * kernels/boards can override this weak function with one of their
 1016  * own.  We just fake metadata...
 1017  */
 1018 __weak_reference(default_parse_boot_param, parse_boot_param);
 1019 
 1020 /*
 1021  * Initialize proc0
 1022  */
 1023 void
 1024 init_proc0(vm_offset_t kstack)
 1025 {
 1026         proc_linkup0(&proc0, &thread0);
 1027         thread0.td_kstack = kstack;
 1028         thread0.td_pcb = (struct pcb *)
 1029                 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
 1030         thread0.td_pcb->pcb_flags = 0;
 1031         thread0.td_frame = &proc0_tf;
 1032         pcpup->pc_curpcb = thread0.td_pcb;
 1033 }
 1034 
 1035 void
 1036 set_stackptrs(int cpu)
 1037 {
 1038 
 1039         set_stackptr(PSR_IRQ32_MODE,
 1040             irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
 1041         set_stackptr(PSR_ABT32_MODE,
 1042             abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
 1043         set_stackptr(PSR_UND32_MODE,
 1044             undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
 1045 }
 1046 
 1047 #ifdef FDT
 1048 static char *
 1049 kenv_next(char *cp)
 1050 {
 1051 
 1052         if (cp != NULL) {
 1053                 while (*cp != 0)
 1054                         cp++;
 1055                 cp++;
 1056                 if (*cp == 0)
 1057                         cp = NULL;
 1058         }
 1059         return (cp);
 1060 }
 1061 
 1062 static void
 1063 print_kenv(void)
 1064 {
 1065         int len;
 1066         char *cp;
 1067 
 1068         debugf("loader passed (static) kenv:\n");
 1069         if (kern_envp == NULL) {
 1070                 debugf(" no env, null ptr\n");
 1071                 return;
 1072         }
 1073         debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp);
 1074 
 1075         len = 0;
 1076         for (cp = kern_envp; cp != NULL; cp = kenv_next(cp))
 1077                 debugf(" %x %s\n", (uint32_t)cp, cp);
 1078 }
 1079 
 1080 static void
 1081 physmap_init(struct mem_region *availmem_regions, int availmem_regions_sz)
 1082 {
 1083         int i, j, cnt;
 1084         vm_offset_t phys_kernelend, kernload;
 1085         uint32_t s, e, sz;
 1086         struct mem_region *mp, *mp1;
 1087 
 1088         phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR);
 1089         kernload = KERNPHYSADDR;
 1090 
 1091         /*
 1092          * Remove kernel physical address range from avail
 1093          * regions list. Page align all regions.
 1094          * Non-page aligned memory isn't very interesting to us.
 1095          * Also, sort the entries for ascending addresses.
 1096          */
 1097         sz = 0;
 1098         cnt = availmem_regions_sz;
 1099         debugf("processing avail regions:\n");
 1100         for (mp = availmem_regions; mp->mr_size; mp++) {
 1101                 s = mp->mr_start;
 1102                 e = mp->mr_start + mp->mr_size;
 1103                 debugf(" %08x-%08x -> ", s, e);
 1104                 /* Check whether this region holds all of the kernel. */
 1105                 if (s < kernload && e > phys_kernelend) {
 1106                         availmem_regions[cnt].mr_start = phys_kernelend;
 1107                         availmem_regions[cnt++].mr_size = e - phys_kernelend;
 1108                         e = kernload;
 1109                 }
 1110                 /* Look whether this regions starts within the kernel. */
 1111                 if (s >= kernload && s < phys_kernelend) {
 1112                         if (e <= phys_kernelend)
 1113                                 goto empty;
 1114                         s = phys_kernelend;
 1115                 }
 1116                 /* Now look whether this region ends within the kernel. */
 1117                 if (e > kernload && e <= phys_kernelend) {
 1118                         if (s >= kernload) {
 1119                                 goto empty;
 1120                         }
 1121                         e = kernload;
 1122                 }
 1123                 /* Now page align the start and size of the region. */
 1124                 s = round_page(s);
 1125                 e = trunc_page(e);
 1126                 if (e < s)
 1127                         e = s;
 1128                 sz = e - s;
 1129                 debugf("%08x-%08x = %x\n", s, e, sz);
 1130 
 1131                 /* Check whether some memory is left here. */
 1132                 if (sz == 0) {
 1133                 empty:
 1134                         printf("skipping\n");
 1135                         bcopy(mp + 1, mp,
 1136                             (cnt - (mp - availmem_regions)) * sizeof(*mp));
 1137                         cnt--;
 1138                         mp--;
 1139                         continue;
 1140                 }
 1141 
 1142                 /* Do an insertion sort. */
 1143                 for (mp1 = availmem_regions; mp1 < mp; mp1++)
 1144                         if (s < mp1->mr_start)
 1145                                 break;
 1146                 if (mp1 < mp) {
 1147                         bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
 1148                         mp1->mr_start = s;
 1149                         mp1->mr_size = sz;
 1150                 } else {
 1151                         mp->mr_start = s;
 1152                         mp->mr_size = sz;
 1153                 }
 1154         }
 1155         availmem_regions_sz = cnt;
 1156 
 1157         /* Fill in phys_avail table, based on availmem_regions */
 1158         debugf("fill in phys_avail:\n");
 1159         for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
 1160 
 1161                 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
 1162                     availmem_regions[i].mr_start,
 1163                     availmem_regions[i].mr_start + availmem_regions[i].mr_size,
 1164                     availmem_regions[i].mr_size);
 1165 
 1166                 /*
 1167                  * We should not map the page at PA 0x0000000, the VM can't
 1168                  * handle it, as pmap_extract() == 0 means failure.
 1169                  */
 1170                 if (availmem_regions[i].mr_start > 0 ||
 1171                     availmem_regions[i].mr_size > PAGE_SIZE) {
 1172                         phys_avail[j] = availmem_regions[i].mr_start;
 1173                         if (phys_avail[j] == 0)
 1174                                 phys_avail[j] += PAGE_SIZE;
 1175                         phys_avail[j + 1] = availmem_regions[i].mr_start +
 1176                             availmem_regions[i].mr_size;
 1177                 } else
 1178                         j -= 2;
 1179         }
 1180         phys_avail[j] = 0;
 1181         phys_avail[j + 1] = 0;
 1182 }
 1183 
 1184 void *
 1185 initarm(struct arm_boot_params *abp)
 1186 {
 1187         struct mem_region memory_regions[FDT_MEM_REGIONS];
 1188         struct mem_region availmem_regions[FDT_MEM_REGIONS];
 1189         struct mem_region reserved_regions[FDT_MEM_REGIONS];
 1190         struct pv_addr kernel_l1pt;
 1191         struct pv_addr dpcpu;
 1192         vm_offset_t dtbp, freemempos, l2_start, lastaddr;
 1193         uint32_t memsize, l2size;
 1194         char *env;
 1195         void *kmdp;
 1196         u_int l1pagetable;
 1197         int i = 0, j = 0, err_devmap = 0;
 1198         int memory_regions_sz;
 1199         int availmem_regions_sz;
 1200         int reserved_regions_sz;
 1201         vm_offset_t start, end;
 1202         vm_offset_t rstart, rend;
 1203         int curr;
 1204 
 1205         lastaddr = parse_boot_param(abp);
 1206         memsize = 0;
 1207         set_cpufuncs();
 1208 
 1209         /*
 1210          * Find the dtb passed in by the boot loader.
 1211          */
 1212         kmdp = preload_search_by_type("elf kernel");
 1213         if (kmdp != NULL)
 1214                 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
 1215         else
 1216                 dtbp = (vm_offset_t)NULL;
 1217 
 1218 #if defined(FDT_DTB_STATIC)
 1219         /*
 1220          * In case the device tree blob was not retrieved (from metadata) try
 1221          * to use the statically embedded one.
 1222          */
 1223         if (dtbp == (vm_offset_t)NULL)
 1224                 dtbp = (vm_offset_t)&fdt_static_dtb;
 1225 #endif
 1226 
 1227         if (OF_install(OFW_FDT, 0) == FALSE)
 1228                 while (1);
 1229 
 1230         if (OF_init((void *)dtbp) != 0)
 1231                 while (1);
 1232 
 1233         /* Grab physical memory regions information from device tree. */
 1234         if (fdt_get_mem_regions(memory_regions, &memory_regions_sz,
 1235             &memsize) != 0)
 1236                 while(1);
 1237 
 1238         /* Grab physical memory regions information from device tree. */
 1239         if (fdt_get_reserved_regions(reserved_regions, &reserved_regions_sz) != 0)
 1240                 reserved_regions_sz = 0;
 1241                 
 1242         /*
 1243          * Now exclude all the reserved regions
 1244          */
 1245         curr = 0;
 1246         for (i = 0; i < memory_regions_sz; i++) {
 1247                 start = memory_regions[i].mr_start;
 1248                 end = start + memory_regions[i].mr_size;
 1249                 for (j = 0; j < reserved_regions_sz; j++) {
 1250                         rstart = reserved_regions[j].mr_start;
 1251                         rend = rstart + reserved_regions[j].mr_size;
 1252                         /* 
 1253                          * Restricted region is before available
 1254                          * Skip restricted region
 1255                          */
 1256                         if (rend <= start)
 1257                                 continue;
 1258                         /* 
 1259                          * Restricted region is behind available
 1260                          * No  further processing required
 1261                          */
 1262                         if (rstart >= end)
 1263                                 break;
 1264                         /*
 1265                          * Restricted region includes memory region
 1266                          * skip available region
 1267                          */
 1268                         if ((start >= rstart) && (rend >= end)) {
 1269                                 start = rend;
 1270                                 end = rend;
 1271                                 break;
 1272                         }
 1273                         /*
 1274                          * Memory region includes restricted region
 1275                          */
 1276                         if ((rstart > start) && (end > rend)) {
 1277                                 availmem_regions[curr].mr_start = start;
 1278                                 availmem_regions[curr++].mr_size = rstart - start;
 1279                                 start = rend;
 1280                                 break;
 1281                         }
 1282                         /*
 1283                          * Memory region partially overlaps with restricted
 1284                          */
 1285                         if ((rstart >= start) && (rstart <= end)) {
 1286                                 end = rstart;
 1287                         }
 1288                         else if ((rend >= start) && (rend <= end)) {
 1289                                 start = rend;
 1290                         }
 1291                 }
 1292 
 1293                 if (end > start) {
 1294                         availmem_regions[curr].mr_start = start;
 1295                         availmem_regions[curr++].mr_size = end - start;
 1296                 }
 1297         }
 1298 
 1299         availmem_regions_sz = curr;
 1300 
 1301         /* Platform-specific initialisation */
 1302         vm_max_kernel_address = initarm_lastaddr();
 1303 
 1304         pcpu0_init();
 1305 
 1306         /* Do basic tuning, hz etc */
 1307         init_param1();
 1308 
 1309         /* Calculate number of L2 tables needed for mapping vm_page_array */
 1310         l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
 1311         l2size = (l2size >> L1_S_SHIFT) + 1;
 1312 
 1313         /*
 1314          * Add one table for end of kernel map, one for stacks, msgbuf and
 1315          * L1 and L2 tables map and one for vectors map.
 1316          */
 1317         l2size += 3;
 1318 
 1319         /* Make it divisible by 4 */
 1320         l2size = (l2size + 3) & ~3;
 1321 
 1322         freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
 1323 
 1324         /* Define a macro to simplify memory allocation */
 1325 #define valloc_pages(var, np)                                           \
 1326         alloc_pages((var).pv_va, (np));                                 \
 1327         (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);
 1328 
 1329 #define alloc_pages(var, np)                                            \
 1330         (var) = freemempos;                                             \
 1331         freemempos += (np * PAGE_SIZE);                                 \
 1332         memset((char *)(var), 0, ((np) * PAGE_SIZE));
 1333 
 1334         while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
 1335                 freemempos += PAGE_SIZE;
 1336         valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
 1337 
 1338         for (i = 0; i < l2size; ++i) {
 1339                 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
 1340                         valloc_pages(kernel_pt_table[i],
 1341                             L2_TABLE_SIZE / PAGE_SIZE);
 1342                         j = i;
 1343                 } else {
 1344                         kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
 1345                             L2_TABLE_SIZE_REAL * (i - j);
 1346                         kernel_pt_table[i].pv_pa =
 1347                             kernel_pt_table[i].pv_va - KERNVIRTADDR +
 1348                             KERNPHYSADDR;
 1349 
 1350                 }
 1351         }
 1352         /*
 1353          * Allocate a page for the system page mapped to 0x00000000
 1354          * or 0xffff0000. This page will just contain the system vectors
 1355          * and can be shared by all processes.
 1356          */
 1357         valloc_pages(systempage, 1);
 1358 
 1359         /* Allocate dynamic per-cpu area. */
 1360         valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
 1361         dpcpu_init((void *)dpcpu.pv_va, 0);
 1362 
 1363         /* Allocate stacks for all modes */
 1364         valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
 1365         valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
 1366         valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
 1367         valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
 1368         valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
 1369 
 1370         /*
 1371          * Now we start construction of the L1 page table
 1372          * We start by mapping the L2 page tables into the L1.
 1373          * This means that we can replace L1 mappings later on if necessary
 1374          */
 1375         l1pagetable = kernel_l1pt.pv_va;
 1376 
 1377         /*
 1378          * Try to map as much as possible of kernel text and data using
 1379          * 1MB section mapping and for the rest of initial kernel address
 1380          * space use L2 coarse tables.
 1381          *
 1382          * Link L2 tables for mapping remainder of kernel (modulo 1MB)
 1383          * and kernel structures
 1384          */
 1385         l2_start = lastaddr & ~(L1_S_OFFSET);
 1386         for (i = 0 ; i < l2size - 1; i++)
 1387                 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
 1388                     &kernel_pt_table[i]);
 1389 
 1390         pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
 1391 
 1392         /* Map kernel code and data */
 1393         pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR,
 1394            (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
 1395             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
 1396 
 1397 
 1398         /* Map L1 directory and allocated L2 page tables */
 1399         pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
 1400             L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
 1401 
 1402         pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
 1403             kernel_pt_table[0].pv_pa,
 1404             L2_TABLE_SIZE_REAL * l2size,
 1405             VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
 1406 
 1407         /* Map allocated DPCPU, stacks and msgbuf */
 1408         pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
 1409             freemempos - dpcpu.pv_va,
 1410             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
 1411 
 1412         /* Link and map the vector page */
 1413         pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
 1414             &kernel_pt_table[l2size - 1]);
 1415         pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
 1416             VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
 1417 
 1418         /* Map pmap_devmap[] entries */
 1419         err_devmap = platform_devmap_init();
 1420         pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table);
 1421 
 1422         cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
 1423         pmap_pa = kernel_l1pt.pv_pa;
 1424         setttb(kernel_l1pt.pv_pa);
 1425         cpu_tlb_flushID();
 1426         cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
 1427 
 1428         /*
 1429          * Only after the SOC registers block is mapped we can perform device
 1430          * tree fixups, as they may attempt to read parameters from hardware.
 1431          */
 1432         OF_interpret("perform-fixup", 0);
 1433 
 1434         initarm_gpio_init();
 1435 
 1436         cninit();
 1437 
 1438         physmem = memsize / PAGE_SIZE;
 1439 
 1440         debugf("initarm: console initialized\n");
 1441         debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
 1442         debugf(" boothowto = 0x%08x\n", boothowto);
 1443         debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
 1444         print_kenv();
 1445 
 1446         env = getenv("kernelname");
 1447         if (env != NULL)
 1448                 strlcpy(kernelname, env, sizeof(kernelname));
 1449 
 1450         if (err_devmap != 0)
 1451                 printf("WARNING: could not fully configure devmap, error=%d\n",
 1452                     err_devmap);
 1453 
 1454         initarm_late_init();
 1455 
 1456         /*
 1457          * Pages were allocated during the secondary bootstrap for the
 1458          * stacks for different CPU modes.
 1459          * We must now set the r13 registers in the different CPU modes to
 1460          * point to these stacks.
 1461          * Since the ARM stacks use STMFD etc. we must set r13 to the top end
 1462          * of the stack memory.
 1463          */
 1464         cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
 1465 
 1466         set_stackptrs(0);
 1467 
 1468         /*
 1469          * We must now clean the cache again....
 1470          * Cleaning may be done by reading new data to displace any
 1471          * dirty data in the cache. This will have happened in setttb()
 1472          * but since we are boot strapping the addresses used for the read
 1473          * may have just been remapped and thus the cache could be out
 1474          * of sync. A re-clean after the switch will cure this.
 1475          * After booting there are no gross relocations of the kernel thus
 1476          * this problem will not occur after initarm().
 1477          */
 1478         cpu_idcache_wbinv_all();
 1479 
 1480         /* Set stack for exception handlers */
 1481         data_abort_handler_address = (u_int)data_abort_handler;
 1482         prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
 1483         undefined_handler_address = (u_int)undefinedinstruction_bounce;
 1484         undefined_init();
 1485 
 1486         init_proc0(kernelstack.pv_va);
 1487 
 1488         arm_intrnames_init();
 1489         arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
 1490         arm_dump_avail_init(memsize, sizeof(dump_avail) / sizeof(dump_avail[0]));
 1491         pmap_bootstrap(freemempos, &kernel_l1pt);
 1492         msgbufp = (void *)msgbufpv.pv_va;
 1493         msgbufinit(msgbufp, msgbufsize);
 1494         mutex_init();
 1495 
 1496         /*
 1497          * Prepare map of physical memory regions available to vm subsystem.
 1498          */
 1499         physmap_init(availmem_regions, availmem_regions_sz);
 1500 
 1501         init_param2(physmem);
 1502         kdb_init();
 1503 
 1504         return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
 1505             sizeof(struct pcb)));
 1506 }
 1507 #endif

Cache object: 7dda09597b57503d2260be61cfaa1d3a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.