The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 2004 Olivier Houchard
    5  * Copyright (c) 1994-1998 Mark Brinicombe.
    6  * Copyright (c) 1994 Brini.
    7  * All rights reserved.
    8  *
    9  * This code is derived from software written for Brini by Mark Brinicombe
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *      This product includes software developed by Mark Brinicombe
   22  *      for the NetBSD Project.
   23  * 4. The name of the company nor the name of the author may be used to
   24  *    endorse or promote products derived from this software without specific
   25  *    prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   28  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   29  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   30  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   37  * SUCH DAMAGE.
   38  *
   39  * Machine dependent functions for kernel setup
   40  *
   41  * Created      : 17/09/94
   42  * Updated      : 18/04/01 updated for new wscons
   43  */
   44 
   45 #include "opt_compat.h"
   46 #include "opt_ddb.h"
   47 #include "opt_kstack_pages.h"
   48 #include "opt_platform.h"
   49 #include "opt_sched.h"
   50 #include "opt_timer.h"
   51 
   52 #include <sys/cdefs.h>
   53 __FBSDID("$FreeBSD: releng/11.1/sys/arm/arm/machdep.c 317005 2017-04-16 07:33:47Z mmel $");
   54 
   55 #include <sys/param.h>
   56 #include <sys/buf.h>
   57 #include <sys/bus.h>
   58 #include <sys/cons.h>
   59 #include <sys/cpu.h>
   60 #include <sys/devmap.h>
   61 #include <sys/efi.h>
   62 #include <sys/imgact.h>
   63 #include <sys/kdb.h>
   64 #include <sys/kernel.h>
   65 #include <sys/linker.h>
   66 #include <sys/msgbuf.h>
   67 #include <sys/rwlock.h>
   68 #include <sys/sched.h>
   69 #include <sys/syscallsubr.h>
   70 #include <sys/sysent.h>
   71 #include <sys/sysproto.h>
   72 
   73 #include <vm/vm_object.h>
   74 #include <vm/vm_page.h>
   75 #include <vm/vm_pager.h>
   76 
   77 #include <machine/debug_monitor.h>
   78 #include <machine/machdep.h>
   79 #include <machine/metadata.h>
   80 #include <machine/pcb.h>
   81 #include <machine/physmem.h>
   82 #include <machine/platform.h>
   83 #include <machine/sysarch.h>
   84 #include <machine/undefined.h>
   85 #include <machine/vfp.h>
   86 #include <machine/vmparam.h>
   87 
   88 #ifdef FDT
   89 #include <dev/fdt/fdt_common.h>
   90 #include <machine/ofw_machdep.h>
   91 #endif
   92 
   93 #ifdef DEBUG
   94 #define debugf(fmt, args...) printf(fmt, ##args)
   95 #else
   96 #define debugf(fmt, args...)
   97 #endif
   98 
   99 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
  100     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
  101     defined(COMPAT_FREEBSD9)
  102 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
  103 #endif
  104 
  105 struct pcpu __pcpu[MAXCPU];
  106 struct pcpu *pcpup = &__pcpu[0];
  107 
  108 static struct trapframe proc0_tf;
  109 uint32_t cpu_reset_address = 0;
  110 int cold = 1;
  111 vm_offset_t vector_page;
  112 
  113 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
  114 int (*_arm_bzero)(void *, int, int) = NULL;
  115 int _min_memcpy_size = 0;
  116 int _min_bzero_size = 0;
  117 
  118 extern int *end;
  119 
  120 #ifdef FDT
  121 vm_paddr_t pmap_pa;
  122 #if __ARM_ARCH >= 6
  123 vm_offset_t systempage;
  124 vm_offset_t irqstack;
  125 vm_offset_t undstack;
  126 vm_offset_t abtstack;
  127 #else
  128 /*
  129  * This is the number of L2 page tables required for covering max
  130  * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
  131  * stacks etc.), uprounded to be divisible by 4.
  132  */
  133 #define KERNEL_PT_MAX   78
  134 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
  135 struct pv_addr systempage;
  136 static struct pv_addr msgbufpv;
  137 struct pv_addr irqstack;
  138 struct pv_addr undstack;
  139 struct pv_addr abtstack;
  140 static struct pv_addr kernelstack;
  141 #endif /* __ARM_ARCH >= 6 */
  142 #endif /* FDT */
  143 
  144 #ifdef MULTIDELAY
  145 static delay_func *delay_impl;
  146 static void *delay_arg;
  147 #endif
  148 
  149 struct kva_md_info kmi;
  150 
  151 /*
  152  * arm32_vector_init:
  153  *
  154  *      Initialize the vector page, and select whether or not to
  155  *      relocate the vectors.
  156  *
  157  *      NOTE: We expect the vector page to be mapped at its expected
  158  *      destination.
  159  */
  160 
  161 extern unsigned int page0[], page0_data[];
  162 void
  163 arm_vector_init(vm_offset_t va, int which)
  164 {
  165         unsigned int *vectors = (int *) va;
  166         unsigned int *vectors_data = vectors + (page0_data - page0);
  167         int vec;
  168 
  169         /*
  170          * Loop through the vectors we're taking over, and copy the
  171          * vector's insn and data word.
  172          */
  173         for (vec = 0; vec < ARM_NVEC; vec++) {
  174                 if ((which & (1 << vec)) == 0) {
  175                         /* Don't want to take over this vector. */
  176                         continue;
  177                 }
  178                 vectors[vec] = page0[vec];
  179                 vectors_data[vec] = page0_data[vec];
  180         }
  181 
  182         /* Now sync the vectors. */
  183         icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
  184 
  185         vector_page = va;
  186 #if __ARM_ARCH < 6
  187         if (va == ARM_VECTORS_HIGH) {
  188                 /*
  189                  * Enable high vectors in the system control reg (SCTLR).
  190                  *
  191                  * Assume the MD caller knows what it's doing here, and really
  192                  * does want the vector page relocated.
  193                  *
  194                  * Note: This has to be done here (and not just in
  195                  * cpu_setup()) because the vector page needs to be
  196                  * accessible *before* cpu_startup() is called.
  197                  * Think ddb(9) ...
  198                  */
  199                 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
  200         }
  201 #endif
  202 }
  203 
  204 static void
  205 cpu_startup(void *dummy)
  206 {
  207         struct pcb *pcb = thread0.td_pcb;
  208         const unsigned int mbyte = 1024 * 1024;
  209 #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
  210         vm_page_t m;
  211 #endif
  212 
  213         identify_arm_cpu();
  214 
  215         vm_ksubmap_init(&kmi);
  216 
  217         /*
  218          * Display the RAM layout.
  219          */
  220         printf("real memory  = %ju (%ju MB)\n",
  221             (uintmax_t)arm32_ptob(realmem),
  222             (uintmax_t)arm32_ptob(realmem) / mbyte);
  223         printf("avail memory = %ju (%ju MB)\n",
  224             (uintmax_t)arm32_ptob(vm_cnt.v_free_count),
  225             (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte);
  226         if (bootverbose) {
  227                 arm_physmem_print_tables();
  228                 devmap_print_table();
  229         }
  230 
  231         bufinit();
  232         vm_pager_bufferinit();
  233         pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
  234             USPACE_SVC_STACK_TOP;
  235         pmap_set_pcb_pagedir(kernel_pmap, pcb);
  236 #if __ARM_ARCH < 6
  237         vector_page_setprot(VM_PROT_READ);
  238         pmap_postinit();
  239 #ifdef ARM_CACHE_LOCK_ENABLE
  240         pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
  241         arm_lock_cache_line(ARM_TP_ADDRESS);
  242 #else
  243         m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
  244         pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
  245 #endif
  246         *(uint32_t *)ARM_RAS_START = 0;
  247         *(uint32_t *)ARM_RAS_END = 0xffffffff;
  248 #endif
  249 }
  250 
  251 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
  252 
  253 /*
  254  * Flush the D-cache for non-DMA I/O so that the I-cache can
  255  * be made coherent later.
  256  */
  257 void
  258 cpu_flush_dcache(void *ptr, size_t len)
  259 {
  260 
  261         dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
  262 }
  263 
  264 /* Get current clock frequency for the given cpu id. */
  265 int
  266 cpu_est_clockrate(int cpu_id, uint64_t *rate)
  267 {
  268 
  269         return (ENXIO);
  270 }
  271 
  272 void
  273 cpu_idle(int busy)
  274 {
  275 
  276         CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
  277         spinlock_enter();
  278 #ifndef NO_EVENTTIMERS
  279         if (!busy)
  280                 cpu_idleclock();
  281 #endif
  282         if (!sched_runnable())
  283                 cpu_sleep(0);
  284 #ifndef NO_EVENTTIMERS
  285         if (!busy)
  286                 cpu_activeclock();
  287 #endif
  288         spinlock_exit();
  289         CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
  290 }
  291 
  292 int
  293 cpu_idle_wakeup(int cpu)
  294 {
  295 
  296         return (0);
  297 }
  298 
  299 /*
  300  * Most ARM platforms don't need to do anything special to init their clocks
  301  * (they get intialized during normal device attachment), and by not defining a
  302  * cpu_initclocks() function they get this generic one.  Any platform that needs
  303  * to do something special can just provide their own implementation, which will
  304  * override this one due to the weak linkage.
  305  */
  306 void
  307 arm_generic_initclocks(void)
  308 {
  309 
  310 #ifndef NO_EVENTTIMERS
  311 #ifdef SMP
  312         if (PCPU_GET(cpuid) == 0)
  313                 cpu_initclocks_bsp();
  314         else
  315                 cpu_initclocks_ap();
  316 #else
  317         cpu_initclocks_bsp();
  318 #endif
  319 #endif
  320 }
  321 __weak_reference(arm_generic_initclocks, cpu_initclocks);
  322 
  323 #ifdef MULTIDELAY
  324 void
  325 arm_set_delay(delay_func *impl, void *arg)
  326 {
  327 
  328         KASSERT(impl != NULL, ("No DELAY implementation"));
  329         delay_impl = impl;
  330         delay_arg = arg;
  331 }
  332 
  333 void
  334 DELAY(int usec)
  335 {
  336 
  337         delay_impl(usec, delay_arg);
  338 }
  339 #endif
  340 
  341 void
  342 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  343 {
  344 }
  345 
  346 void
  347 spinlock_enter(void)
  348 {
  349         struct thread *td;
  350         register_t cspr;
  351 
  352         td = curthread;
  353         if (td->td_md.md_spinlock_count == 0) {
  354                 cspr = disable_interrupts(PSR_I | PSR_F);
  355                 td->td_md.md_spinlock_count = 1;
  356                 td->td_md.md_saved_cspr = cspr;
  357         } else
  358                 td->td_md.md_spinlock_count++;
  359         critical_enter();
  360 }
  361 
  362 void
  363 spinlock_exit(void)
  364 {
  365         struct thread *td;
  366         register_t cspr;
  367 
  368         td = curthread;
  369         critical_exit();
  370         cspr = td->td_md.md_saved_cspr;
  371         td->td_md.md_spinlock_count--;
  372         if (td->td_md.md_spinlock_count == 0)
  373                 restore_interrupts(cspr);
  374 }
  375 
  376 /*
  377  * Clear registers on exec
  378  */
  379 void
  380 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
  381 {
  382         struct trapframe *tf = td->td_frame;
  383 
  384         memset(tf, 0, sizeof(*tf));
  385         tf->tf_usr_sp = stack;
  386         tf->tf_usr_lr = imgp->entry_addr;
  387         tf->tf_svc_lr = 0x77777777;
  388         tf->tf_pc = imgp->entry_addr;
  389         tf->tf_spsr = PSR_USR32_MODE;
  390 }
  391 
  392 
  393 #ifdef VFP
  394 /*
  395  * Get machine VFP context.
  396  */
  397 static void
  398 get_vfpcontext(struct thread *td, mcontext_vfp_t *vfp)
  399 {
  400         struct pcb *curpcb;
  401 
  402         curpcb = curthread->td_pcb;
  403         critical_enter();
  404 
  405         vfp_store(&curpcb->pcb_vfpstate, false);
  406         memcpy(vfp->mcv_reg, curpcb->pcb_vfpstate.reg,
  407             sizeof(vfp->mcv_reg));
  408         vfp->mcv_fpscr = curpcb->pcb_vfpstate.fpscr;
  409 
  410         critical_exit();
  411 }
  412 
  413 /*
  414  * Set machine VFP context.
  415  */
  416 static void
  417 set_vfpcontext(struct thread *td, mcontext_vfp_t *vfp)
  418 {
  419         struct pcb *curpcb;
  420 
  421         curpcb = curthread->td_pcb;
  422         critical_enter();
  423 
  424         vfp_discard(td);
  425         memcpy(curpcb->pcb_vfpstate.reg, vfp->mcv_reg,
  426             sizeof(curpcb->pcb_vfpstate.reg));
  427         curpcb->pcb_vfpstate.fpscr = vfp->mcv_fpscr;
  428 
  429         critical_exit();
  430 }
  431 #endif
  432 
  433 /*
  434  * Get machine context.
  435  */
  436 int
  437 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
  438 {
  439         struct trapframe *tf = td->td_frame;
  440         __greg_t *gr = mcp->__gregs;
  441 
  442         if (clear_ret & GET_MC_CLEAR_RET) {
  443                 gr[_REG_R0] = 0;
  444                 gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
  445         } else {
  446                 gr[_REG_R0]   = tf->tf_r0;
  447                 gr[_REG_CPSR] = tf->tf_spsr;
  448         }
  449         gr[_REG_R1]   = tf->tf_r1;
  450         gr[_REG_R2]   = tf->tf_r2;
  451         gr[_REG_R3]   = tf->tf_r3;
  452         gr[_REG_R4]   = tf->tf_r4;
  453         gr[_REG_R5]   = tf->tf_r5;
  454         gr[_REG_R6]   = tf->tf_r6;
  455         gr[_REG_R7]   = tf->tf_r7;
  456         gr[_REG_R8]   = tf->tf_r8;
  457         gr[_REG_R9]   = tf->tf_r9;
  458         gr[_REG_R10]  = tf->tf_r10;
  459         gr[_REG_R11]  = tf->tf_r11;
  460         gr[_REG_R12]  = tf->tf_r12;
  461         gr[_REG_SP]   = tf->tf_usr_sp;
  462         gr[_REG_LR]   = tf->tf_usr_lr;
  463         gr[_REG_PC]   = tf->tf_pc;
  464 
  465         mcp->mc_vfp_size = 0;
  466         mcp->mc_vfp_ptr = NULL;
  467         memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare));
  468 
  469         return (0);
  470 }
  471 
  472 /*
  473  * Set machine context.
  474  *
  475  * However, we don't set any but the user modifiable flags, and we won't
  476  * touch the cs selector.
  477  */
  478 int
  479 set_mcontext(struct thread *td, mcontext_t *mcp)
  480 {
  481         mcontext_vfp_t mc_vfp, *vfp;
  482         struct trapframe *tf = td->td_frame;
  483         const __greg_t *gr = mcp->__gregs;
  484 
  485 #ifdef WITNESS
  486         if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(mc_vfp)) {
  487                 printf("%s: %s: Malformed mc_vfp_size: %d (0x%08X)\n",
  488                     td->td_proc->p_comm, __func__,
  489                     mcp->mc_vfp_size, mcp->mc_vfp_size);
  490         } else if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr == NULL) {
  491                 printf("%s: %s: c_vfp_size != 0 but mc_vfp_ptr == NULL\n",
  492                     td->td_proc->p_comm, __func__);
  493         }
  494 #endif
  495 
  496         if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != NULL) {
  497                 if (copyin(mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0)
  498                         return (EFAULT);
  499                 vfp = &mc_vfp;
  500         } else {
  501                 vfp = NULL;
  502         }
  503 
  504         tf->tf_r0 = gr[_REG_R0];
  505         tf->tf_r1 = gr[_REG_R1];
  506         tf->tf_r2 = gr[_REG_R2];
  507         tf->tf_r3 = gr[_REG_R3];
  508         tf->tf_r4 = gr[_REG_R4];
  509         tf->tf_r5 = gr[_REG_R5];
  510         tf->tf_r6 = gr[_REG_R6];
  511         tf->tf_r7 = gr[_REG_R7];
  512         tf->tf_r8 = gr[_REG_R8];
  513         tf->tf_r9 = gr[_REG_R9];
  514         tf->tf_r10 = gr[_REG_R10];
  515         tf->tf_r11 = gr[_REG_R11];
  516         tf->tf_r12 = gr[_REG_R12];
  517         tf->tf_usr_sp = gr[_REG_SP];
  518         tf->tf_usr_lr = gr[_REG_LR];
  519         tf->tf_pc = gr[_REG_PC];
  520         tf->tf_spsr = gr[_REG_CPSR];
  521 #ifdef VFP
  522         if (vfp != NULL)
  523                 set_vfpcontext(td, vfp);
  524 #endif
  525         return (0);
  526 }
  527 
  528 void
  529 sendsig(catcher, ksi, mask)
  530         sig_t catcher;
  531         ksiginfo_t *ksi;
  532         sigset_t *mask;
  533 {
  534         struct thread *td;
  535         struct proc *p;
  536         struct trapframe *tf;
  537         struct sigframe *fp, frame;
  538         struct sigacts *psp;
  539         struct sysentvec *sysent;
  540         int onstack;
  541         int sig;
  542         int code;
  543 
  544         td = curthread;
  545         p = td->td_proc;
  546         PROC_LOCK_ASSERT(p, MA_OWNED);
  547         sig = ksi->ksi_signo;
  548         code = ksi->ksi_code;
  549         psp = p->p_sigacts;
  550         mtx_assert(&psp->ps_mtx, MA_OWNED);
  551         tf = td->td_frame;
  552         onstack = sigonstack(tf->tf_usr_sp);
  553 
  554         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
  555             catcher, sig);
  556 
  557         /* Allocate and validate space for the signal handler context. */
  558         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
  559             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  560                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
  561                     td->td_sigstk.ss_size);
  562 #if defined(COMPAT_43)
  563                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  564 #endif
  565         } else
  566                 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
  567 
  568         /* make room on the stack */
  569         fp--;
  570 
  571         /* make the stack aligned */
  572         fp = (struct sigframe *)STACKALIGN(fp);
  573         /* Populate the siginfo frame. */
  574         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
  575 #ifdef VFP
  576         get_vfpcontext(td, &frame.sf_vfp);
  577         frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp);
  578         frame.sf_uc.uc_mcontext.mc_vfp_ptr = &fp->sf_vfp;
  579 #else
  580         frame.sf_uc.uc_mcontext.mc_vfp_size = 0;
  581         frame.sf_uc.uc_mcontext.mc_vfp_ptr = NULL;
  582 #endif
  583         frame.sf_si = ksi->ksi_info;
  584         frame.sf_uc.uc_sigmask = *mask;
  585         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
  586             ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  587         frame.sf_uc.uc_stack = td->td_sigstk;
  588         mtx_unlock(&psp->ps_mtx);
  589         PROC_UNLOCK(td->td_proc);
  590 
  591         /* Copy the sigframe out to the user's stack. */
  592         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
  593                 /* Process has trashed its stack. Kill it. */
  594                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
  595                 PROC_LOCK(p);
  596                 sigexit(td, SIGILL);
  597         }
  598 
  599         /*
  600          * Build context to run handler in.  We invoke the handler
  601          * directly, only returning via the trampoline.  Note the
  602          * trampoline version numbers are coordinated with machine-
  603          * dependent code in libc.
  604          */
  605 
  606         tf->tf_r0 = sig;
  607         tf->tf_r1 = (register_t)&fp->sf_si;
  608         tf->tf_r2 = (register_t)&fp->sf_uc;
  609 
  610         /* the trampoline uses r5 as the uc address */
  611         tf->tf_r5 = (register_t)&fp->sf_uc;
  612         tf->tf_pc = (register_t)catcher;
  613         tf->tf_usr_sp = (register_t)fp;
  614         sysent = p->p_sysent;
  615         if (sysent->sv_sigcode_base != 0)
  616                 tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
  617         else
  618                 tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
  619                     *(sysent->sv_szsigcode));
  620         /* Set the mode to enter in the signal handler */
  621 #if __ARM_ARCH >= 7
  622         if ((register_t)catcher & 1)
  623                 tf->tf_spsr |= PSR_T;
  624         else
  625                 tf->tf_spsr &= ~PSR_T;
  626 #endif
  627 
  628         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
  629             tf->tf_usr_sp);
  630 
  631         PROC_LOCK(p);
  632         mtx_lock(&psp->ps_mtx);
  633 }
  634 
  635 int
  636 sys_sigreturn(td, uap)
  637         struct thread *td;
  638         struct sigreturn_args /* {
  639                 const struct __ucontext *sigcntxp;
  640         } */ *uap;
  641 {
  642         ucontext_t uc;
  643         int spsr;
  644 
  645         if (uap == NULL)
  646                 return (EFAULT);
  647         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
  648                 return (EFAULT);
  649         /*
  650          * Make sure the processor mode has not been tampered with and
  651          * interrupts have not been disabled.
  652          */
  653         spsr = uc.uc_mcontext.__gregs[_REG_CPSR];
  654         if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
  655             (spsr & (PSR_I | PSR_F)) != 0)
  656                 return (EINVAL);
  657         /* Restore register context. */
  658         set_mcontext(td, &uc.uc_mcontext);
  659 
  660         /* Restore signal mask. */
  661         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
  662 
  663         return (EJUSTRETURN);
  664 }
  665 
  666 /*
  667  * Construct a PCB from a trapframe. This is called from kdb_trap() where
  668  * we want to start a backtrace from the function that caused us to enter
  669  * the debugger. We have the context in the trapframe, but base the trace
  670  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
  671  * enough for a backtrace.
  672  */
  673 void
  674 makectx(struct trapframe *tf, struct pcb *pcb)
  675 {
  676         pcb->pcb_regs.sf_r4 = tf->tf_r4;
  677         pcb->pcb_regs.sf_r5 = tf->tf_r5;
  678         pcb->pcb_regs.sf_r6 = tf->tf_r6;
  679         pcb->pcb_regs.sf_r7 = tf->tf_r7;
  680         pcb->pcb_regs.sf_r8 = tf->tf_r8;
  681         pcb->pcb_regs.sf_r9 = tf->tf_r9;
  682         pcb->pcb_regs.sf_r10 = tf->tf_r10;
  683         pcb->pcb_regs.sf_r11 = tf->tf_r11;
  684         pcb->pcb_regs.sf_r12 = tf->tf_r12;
  685         pcb->pcb_regs.sf_pc = tf->tf_pc;
  686         pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
  687         pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
  688 }
  689 
  690 void
  691 pcpu0_init(void)
  692 {
  693 #if __ARM_ARCH >= 6
  694         set_curthread(&thread0);
  695 #endif
  696         pcpu_init(pcpup, 0, sizeof(struct pcpu));
  697         PCPU_SET(curthread, &thread0);
  698 }
  699 
  700 /*
  701  * Initialize proc0
  702  */
  703 void
  704 init_proc0(vm_offset_t kstack)
  705 {
  706         proc_linkup0(&proc0, &thread0);
  707         thread0.td_kstack = kstack;
  708         thread0.td_pcb = (struct pcb *)
  709                 (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1;
  710         thread0.td_pcb->pcb_flags = 0;
  711         thread0.td_pcb->pcb_vfpcpu = -1;
  712         thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
  713         thread0.td_frame = &proc0_tf;
  714         pcpup->pc_curpcb = thread0.td_pcb;
  715 }
  716 
  717 #if __ARM_ARCH >= 6
  718 void
  719 set_stackptrs(int cpu)
  720 {
  721 
  722         set_stackptr(PSR_IRQ32_MODE,
  723             irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  724         set_stackptr(PSR_ABT32_MODE,
  725             abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  726         set_stackptr(PSR_UND32_MODE,
  727             undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  728 }
  729 #else
  730 void
  731 set_stackptrs(int cpu)
  732 {
  733 
  734         set_stackptr(PSR_IRQ32_MODE,
  735             irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  736         set_stackptr(PSR_ABT32_MODE,
  737             abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  738         set_stackptr(PSR_UND32_MODE,
  739             undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  740 }
  741 #endif
  742 
  743 
  744 #ifdef FDT
  745 #if __ARM_ARCH < 6
  746 void *
  747 initarm(struct arm_boot_params *abp)
  748 {
  749         struct mem_region mem_regions[FDT_MEM_REGIONS];
  750         struct pv_addr kernel_l1pt;
  751         struct pv_addr dpcpu;
  752         vm_offset_t dtbp, freemempos, l2_start, lastaddr;
  753         uint64_t memsize;
  754         uint32_t l2size;
  755         char *env;
  756         void *kmdp;
  757         u_int l1pagetable;
  758         int i, j, err_devmap, mem_regions_sz;
  759 
  760         lastaddr = parse_boot_param(abp);
  761         arm_physmem_kernaddr = abp->abp_physaddr;
  762 
  763         memsize = 0;
  764 
  765         cpuinfo_init();
  766         set_cpufuncs();
  767 
  768         /*
  769          * Find the dtb passed in by the boot loader.
  770          */
  771         kmdp = preload_search_by_type("elf kernel");
  772         if (kmdp != NULL)
  773                 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
  774         else
  775                 dtbp = (vm_offset_t)NULL;
  776 
  777 #if defined(FDT_DTB_STATIC)
  778         /*
  779          * In case the device tree blob was not retrieved (from metadata) try
  780          * to use the statically embedded one.
  781          */
  782         if (dtbp == (vm_offset_t)NULL)
  783                 dtbp = (vm_offset_t)&fdt_static_dtb;
  784 #endif
  785 
  786         if (OF_install(OFW_FDT, 0) == FALSE)
  787                 panic("Cannot install FDT");
  788 
  789         if (OF_init((void *)dtbp) != 0)
  790                 panic("OF_init failed with the found device tree");
  791 
  792         /* Grab physical memory regions information from device tree. */
  793         if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
  794                 panic("Cannot get physical memory regions");
  795         arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
  796 
  797         /* Grab reserved memory regions information from device tree. */
  798         if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
  799                 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
  800                     EXFLAG_NODUMP | EXFLAG_NOALLOC);
  801 
  802         /* Platform-specific initialisation */
  803         platform_probe_and_attach();
  804 
  805         pcpu0_init();
  806 
  807         /* Do basic tuning, hz etc */
  808         init_param1();
  809 
  810         /* Calculate number of L2 tables needed for mapping vm_page_array */
  811         l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
  812         l2size = (l2size >> L1_S_SHIFT) + 1;
  813 
  814         /*
  815          * Add one table for end of kernel map, one for stacks, msgbuf and
  816          * L1 and L2 tables map and one for vectors map.
  817          */
  818         l2size += 3;
  819 
  820         /* Make it divisible by 4 */
  821         l2size = (l2size + 3) & ~3;
  822 
  823         freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
  824 
  825         /* Define a macro to simplify memory allocation */
  826 #define valloc_pages(var, np)                                           \
  827         alloc_pages((var).pv_va, (np));                                 \
  828         (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
  829 
  830 #define alloc_pages(var, np)                                            \
  831         (var) = freemempos;                                             \
  832         freemempos += (np * PAGE_SIZE);                                 \
  833         memset((char *)(var), 0, ((np) * PAGE_SIZE));
  834 
  835         while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
  836                 freemempos += PAGE_SIZE;
  837         valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
  838 
  839         for (i = 0, j = 0; i < l2size; ++i) {
  840                 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
  841                         valloc_pages(kernel_pt_table[i],
  842                             L2_TABLE_SIZE / PAGE_SIZE);
  843                         j = i;
  844                 } else {
  845                         kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
  846                             L2_TABLE_SIZE_REAL * (i - j);
  847                         kernel_pt_table[i].pv_pa =
  848                             kernel_pt_table[i].pv_va - KERNVIRTADDR +
  849                             abp->abp_physaddr;
  850 
  851                 }
  852         }
  853         /*
  854          * Allocate a page for the system page mapped to 0x00000000
  855          * or 0xffff0000. This page will just contain the system vectors
  856          * and can be shared by all processes.
  857          */
  858         valloc_pages(systempage, 1);
  859 
  860         /* Allocate dynamic per-cpu area. */
  861         valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
  862         dpcpu_init((void *)dpcpu.pv_va, 0);
  863 
  864         /* Allocate stacks for all modes */
  865         valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
  866         valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
  867         valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
  868         valloc_pages(kernelstack, kstack_pages * MAXCPU);
  869         valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
  870 
  871         /*
  872          * Now we start construction of the L1 page table
  873          * We start by mapping the L2 page tables into the L1.
  874          * This means that we can replace L1 mappings later on if necessary
  875          */
  876         l1pagetable = kernel_l1pt.pv_va;
  877 
  878         /*
  879          * Try to map as much as possible of kernel text and data using
  880          * 1MB section mapping and for the rest of initial kernel address
  881          * space use L2 coarse tables.
  882          *
  883          * Link L2 tables for mapping remainder of kernel (modulo 1MB)
  884          * and kernel structures
  885          */
  886         l2_start = lastaddr & ~(L1_S_OFFSET);
  887         for (i = 0 ; i < l2size - 1; i++)
  888                 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
  889                     &kernel_pt_table[i]);
  890 
  891         pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
  892 
  893         /* Map kernel code and data */
  894         pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
  895            (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
  896             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  897 
  898         /* Map L1 directory and allocated L2 page tables */
  899         pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
  900             L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
  901 
  902         pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
  903             kernel_pt_table[0].pv_pa,
  904             L2_TABLE_SIZE_REAL * l2size,
  905             VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
  906 
  907         /* Map allocated DPCPU, stacks and msgbuf */
  908         pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
  909             freemempos - dpcpu.pv_va,
  910             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  911 
  912         /* Link and map the vector page */
  913         pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
  914             &kernel_pt_table[l2size - 1]);
  915         pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
  916             VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
  917 
  918         /* Establish static device mappings. */
  919         err_devmap = platform_devmap_init();
  920         devmap_bootstrap(l1pagetable, NULL);
  921         vm_max_kernel_address = platform_lastaddr();
  922 
  923         cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
  924         pmap_pa = kernel_l1pt.pv_pa;
  925         cpu_setttb(kernel_l1pt.pv_pa);
  926         cpu_tlb_flushID();
  927         cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
  928 
  929         /*
  930          * Now that proper page tables are installed, call cpu_setup() to enable
  931          * instruction and data caches and other chip-specific features.
  932          */
  933         cpu_setup();
  934 
  935         /*
  936          * Only after the SOC registers block is mapped we can perform device
  937          * tree fixups, as they may attempt to read parameters from hardware.
  938          */
  939         OF_interpret("perform-fixup", 0);
  940 
  941         platform_gpio_init();
  942 
  943         cninit();
  944 
  945         debugf("initarm: console initialized\n");
  946         debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
  947         debugf(" boothowto = 0x%08x\n", boothowto);
  948         debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
  949         arm_print_kenv();
  950 
  951         env = kern_getenv("kernelname");
  952         if (env != NULL) {
  953                 strlcpy(kernelname, env, sizeof(kernelname));
  954                 freeenv(env);
  955         }
  956 
  957         if (err_devmap != 0)
  958                 printf("WARNING: could not fully configure devmap, error=%d\n",
  959                     err_devmap);
  960 
  961         platform_late_init();
  962 
  963         /*
  964          * Pages were allocated during the secondary bootstrap for the
  965          * stacks for different CPU modes.
  966          * We must now set the r13 registers in the different CPU modes to
  967          * point to these stacks.
  968          * Since the ARM stacks use STMFD etc. we must set r13 to the top end
  969          * of the stack memory.
  970          */
  971         cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
  972 
  973         set_stackptrs(0);
  974 
  975         /*
  976          * We must now clean the cache again....
  977          * Cleaning may be done by reading new data to displace any
  978          * dirty data in the cache. This will have happened in cpu_setttb()
  979          * but since we are boot strapping the addresses used for the read
  980          * may have just been remapped and thus the cache could be out
  981          * of sync. A re-clean after the switch will cure this.
  982          * After booting there are no gross relocations of the kernel thus
  983          * this problem will not occur after initarm().
  984          */
  985         cpu_idcache_wbinv_all();
  986 
  987         undefined_init();
  988 
  989         init_proc0(kernelstack.pv_va);
  990 
  991         arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
  992         pmap_bootstrap(freemempos, &kernel_l1pt);
  993         msgbufp = (void *)msgbufpv.pv_va;
  994         msgbufinit(msgbufp, msgbufsize);
  995         mutex_init();
  996 
  997         /*
  998          * Exclude the kernel (and all the things we allocated which immediately
  999          * follow the kernel) from the VM allocation pool but not from crash
 1000          * dumps.  virtual_avail is a global variable which tracks the kva we've
 1001          * "allocated" while setting up pmaps.
 1002          *
 1003          * Prepare the list of physical memory available to the vm subsystem.
 1004          */
 1005         arm_physmem_exclude_region(abp->abp_physaddr,
 1006             (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
 1007         arm_physmem_init_kernel_globals();
 1008 
 1009         init_param2(physmem);
 1010         dbg_monitor_init();
 1011         kdb_init();
 1012 
 1013         return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
 1014             sizeof(struct pcb)));
 1015 }
 1016 #else /* __ARM_ARCH < 6 */
 1017 void *
 1018 initarm(struct arm_boot_params *abp)
 1019 {
 1020         struct mem_region mem_regions[FDT_MEM_REGIONS];
 1021         vm_paddr_t lastaddr;
 1022         vm_offset_t dtbp, kernelstack, dpcpu;
 1023         char *env;
 1024         void *kmdp;
 1025         int err_devmap, mem_regions_sz;
 1026 #ifdef EFI
 1027         struct efi_map_header *efihdr;
 1028 #endif
 1029 
 1030         /* get last allocated physical address */
 1031         arm_physmem_kernaddr = abp->abp_physaddr;
 1032         lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
 1033 
 1034         set_cpufuncs();
 1035         cpuinfo_init();
 1036 
 1037         /*
 1038          * Find the dtb passed in by the boot loader.
 1039          */
 1040         kmdp = preload_search_by_type("elf kernel");
 1041         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
 1042 #if defined(FDT_DTB_STATIC)
 1043         /*
 1044          * In case the device tree blob was not retrieved (from metadata) try
 1045          * to use the statically embedded one.
 1046          */
 1047         if (dtbp == (vm_offset_t)NULL)
 1048                 dtbp = (vm_offset_t)&fdt_static_dtb;
 1049 #endif
 1050 
 1051         if (OF_install(OFW_FDT, 0) == FALSE)
 1052                 panic("Cannot install FDT");
 1053 
 1054         if (OF_init((void *)dtbp) != 0)
 1055                 panic("OF_init failed with the found device tree");
 1056 
 1057 #if defined(LINUX_BOOT_ABI)
 1058         arm_parse_fdt_bootargs();
 1059 #endif
 1060 
 1061 #ifdef EFI
 1062         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
 1063             MODINFO_METADATA | MODINFOMD_EFI_MAP);
 1064         if (efihdr != NULL) {
 1065                 arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
 1066         } else
 1067 #endif
 1068         {
 1069                 /* Grab physical memory regions information from device tree. */
 1070                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
 1071                         panic("Cannot get physical memory regions");
 1072         }
 1073         arm_physmem_hardware_regions(mem_regions, mem_regions_sz);
 1074 
 1075         /* Grab reserved memory regions information from device tree. */
 1076         if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
 1077                 arm_physmem_exclude_regions(mem_regions, mem_regions_sz,
 1078                     EXFLAG_NODUMP | EXFLAG_NOALLOC);
 1079 
 1080         /*
 1081          * Set TEX remapping registers.
 1082          * Setup kernel page tables and switch to kernel L1 page table.
 1083          */
 1084         pmap_set_tex();
 1085         pmap_bootstrap_prepare(lastaddr);
 1086 
 1087         /*
 1088          * Now that proper page tables are installed, call cpu_setup() to enable
 1089          * instruction and data caches and other chip-specific features.
 1090          */
 1091         cpu_setup();
 1092 
 1093         /* Platform-specific initialisation */
 1094         platform_probe_and_attach();
 1095         pcpu0_init();
 1096 
 1097         /* Do basic tuning, hz etc */
 1098         init_param1();
 1099 
 1100         /*
 1101          * Allocate a page for the system page mapped to 0xffff0000
 1102          * This page will just contain the system vectors and can be
 1103          * shared by all processes.
 1104          */
 1105         systempage = pmap_preboot_get_pages(1);
 1106 
 1107         /* Map the vector page. */
 1108         pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
 1109         if (virtual_end >= ARM_VECTORS_HIGH)
 1110                 virtual_end = ARM_VECTORS_HIGH - 1;
 1111 
 1112         /* Allocate dynamic per-cpu area. */
 1113         dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
 1114         dpcpu_init((void *)dpcpu, 0);
 1115 
 1116         /* Allocate stacks for all modes */
 1117         irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
 1118         abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
 1119         undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
 1120         kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU);
 1121 
 1122         /* Allocate message buffer. */
 1123         msgbufp = (void *)pmap_preboot_get_vpages(
 1124             round_page(msgbufsize) / PAGE_SIZE);
 1125 
 1126         /*
 1127          * Pages were allocated during the secondary bootstrap for the
 1128          * stacks for different CPU modes.
 1129          * We must now set the r13 registers in the different CPU modes to
 1130          * point to these stacks.
 1131          * Since the ARM stacks use STMFD etc. we must set r13 to the top end
 1132          * of the stack memory.
 1133          */
 1134         set_stackptrs(0);
 1135         mutex_init();
 1136 
 1137         /* Establish static device mappings. */
 1138         err_devmap = platform_devmap_init();
 1139         devmap_bootstrap(0, NULL);
 1140         vm_max_kernel_address = platform_lastaddr();
 1141 
 1142         /*
 1143          * Only after the SOC registers block is mapped we can perform device
 1144          * tree fixups, as they may attempt to read parameters from hardware.
 1145          */
 1146         OF_interpret("perform-fixup", 0);
 1147         platform_gpio_init();
 1148         cninit();
 1149 
 1150         debugf("initarm: console initialized\n");
 1151         debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
 1152         debugf(" boothowto = 0x%08x\n", boothowto);
 1153         debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
 1154         debugf(" lastaddr1: 0x%08x\n", lastaddr);
 1155         arm_print_kenv();
 1156 
 1157         env = kern_getenv("kernelname");
 1158         if (env != NULL)
 1159                 strlcpy(kernelname, env, sizeof(kernelname));
 1160 
 1161         if (err_devmap != 0)
 1162                 printf("WARNING: could not fully configure devmap, error=%d\n",
 1163                     err_devmap);
 1164 
 1165         platform_late_init();
 1166 
 1167         /*
 1168          * We must now clean the cache again....
 1169          * Cleaning may be done by reading new data to displace any
 1170          * dirty data in the cache. This will have happened in cpu_setttb()
 1171          * but since we are boot strapping the addresses used for the read
 1172          * may have just been remapped and thus the cache could be out
 1173          * of sync. A re-clean after the switch will cure this.
 1174          * After booting there are no gross relocations of the kernel thus
 1175          * this problem will not occur after initarm().
 1176          */
 1177         /* Set stack for exception handlers */
 1178         undefined_init();
 1179         init_proc0(kernelstack);
 1180         arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
 1181         enable_interrupts(PSR_A);
 1182         pmap_bootstrap(0);
 1183 
 1184         /* Exclude the kernel (and all the things we allocated which immediately
 1185          * follow the kernel) from the VM allocation pool but not from crash
 1186          * dumps.  virtual_avail is a global variable which tracks the kva we've
 1187          * "allocated" while setting up pmaps.
 1188          *
 1189          * Prepare the list of physical memory available to the vm subsystem.
 1190          */
 1191         arm_physmem_exclude_region(abp->abp_physaddr,
 1192                 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
 1193         arm_physmem_init_kernel_globals();
 1194 
 1195         init_param2(physmem);
 1196         /* Init message buffer. */
 1197         msgbufinit(msgbufp, msgbufsize);
 1198         dbg_monitor_init();
 1199         kdb_init();
 1200         return ((void *)STACKALIGN(thread0.td_pcb));
 1201 
 1202 }
 1203 
 1204 #endif /* __ARM_ARCH < 6 */
 1205 #endif /* FDT */

Cache object: ef58517f8d808b0ce2a1e462636175a5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.