The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $        */
    2 
    3 /*-
    4  * SPDX-License-Identifier: BSD-4-Clause
    5  *
    6  * Copyright (c) 2004 Olivier Houchard
    7  * Copyright (c) 1994-1998 Mark Brinicombe.
    8  * Copyright (c) 1994 Brini.
    9  * All rights reserved.
   10  *
   11  * This code is derived from software written for Brini by Mark Brinicombe
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by Mark Brinicombe
   24  *      for the NetBSD Project.
   25  * 4. The name of the company nor the name of the author may be used to
   26  *    endorse or promote products derived from this software without specific
   27  *    prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   30  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   31  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   32  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   33  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   34  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   35  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   39  * SUCH DAMAGE.
   40  *
   41  * Machine dependent functions for kernel setup
   42  *
   43  * Created      : 17/09/94
   44  * Updated      : 18/04/01 updated for new wscons
   45  */
   46 
   47 #include "opt_ddb.h"
   48 #include "opt_kstack_pages.h"
   49 #include "opt_platform.h"
   50 #include "opt_sched.h"
   51 #include "opt_timer.h"
   52 
   53 #include <sys/cdefs.h>
   54 __FBSDID("$FreeBSD$");
   55 
   56 #include <sys/param.h>
   57 #include <sys/buf.h>
   58 #include <sys/bus.h>
   59 #include <sys/cons.h>
   60 #include <sys/cpu.h>
   61 #include <sys/devmap.h>
   62 #include <sys/efi.h>
   63 #include <sys/imgact.h>
   64 #include <sys/kdb.h>
   65 #include <sys/kernel.h>
   66 #include <sys/linker.h>
   67 #include <sys/msgbuf.h>
   68 #include <sys/physmem.h>
   69 #include <sys/reboot.h>
   70 #include <sys/rwlock.h>
   71 #include <sys/sched.h>
   72 #include <sys/syscallsubr.h>
   73 #include <sys/sysent.h>
   74 #include <sys/sysproto.h>
   75 #include <sys/vmmeter.h>
   76 
   77 #include <vm/vm_object.h>
   78 #include <vm/vm_page.h>
   79 #include <vm/vm_pager.h>
   80 
   81 #include <machine/asm.h>
   82 #include <machine/debug_monitor.h>
   83 #include <machine/machdep.h>
   84 #include <machine/metadata.h>
   85 #include <machine/pcb.h>
   86 #include <machine/platform.h>
   87 #include <machine/sysarch.h>
   88 #include <machine/undefined.h>
   89 #include <machine/vfp.h>
   90 #include <machine/vmparam.h>
   91 
   92 #ifdef FDT
   93 #include <dev/fdt/fdt_common.h>
   94 #include <machine/ofw_machdep.h>
   95 #endif
   96 
   97 #ifdef DEBUG
   98 #define debugf(fmt, args...) printf(fmt, ##args)
   99 #else
  100 #define debugf(fmt, args...)
  101 #endif
  102 
  103 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
  104     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
  105     defined(COMPAT_FREEBSD9)
  106 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
  107 #endif
  108 
  109 #if __ARM_ARCH >= 6 && !defined(INTRNG)
  110 #error armv6 requires INTRNG
  111 #endif
  112 
  113 #ifndef _ARM_ARCH_5E
  114 #error FreeBSD requires ARMv5 or later
  115 #endif
  116 
  117 struct pcpu __pcpu[MAXCPU];
  118 struct pcpu *pcpup = &__pcpu[0];
  119 
  120 static struct trapframe proc0_tf;
  121 uint32_t cpu_reset_address = 0;
  122 int cold = 1;
  123 vm_offset_t vector_page;
  124 
  125 /* The address at which the kernel was loaded.  Set early in initarm(). */
  126 vm_paddr_t arm_physmem_kernaddr;
  127 
  128 int (*_arm_memcpy)(void *, void *, int, int) = NULL;
  129 int (*_arm_bzero)(void *, int, int) = NULL;
  130 int _min_memcpy_size = 0;
  131 int _min_bzero_size = 0;
  132 
  133 extern int *end;
  134 
  135 #ifdef FDT
  136 vm_paddr_t pmap_pa;
  137 #if __ARM_ARCH >= 6
  138 vm_offset_t systempage;
  139 vm_offset_t irqstack;
  140 vm_offset_t undstack;
  141 vm_offset_t abtstack;
  142 #else
  143 /*
  144  * This is the number of L2 page tables required for covering max
  145  * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
  146  * stacks etc.), uprounded to be divisible by 4.
  147  */
  148 #define KERNEL_PT_MAX   78
  149 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
  150 struct pv_addr systempage;
  151 static struct pv_addr msgbufpv;
  152 struct pv_addr irqstack;
  153 struct pv_addr undstack;
  154 struct pv_addr abtstack;
  155 static struct pv_addr kernelstack;
  156 #endif /* __ARM_ARCH >= 6 */
  157 #endif /* FDT */
  158 
  159 #ifdef PLATFORM
  160 static delay_func *delay_impl;
  161 static void *delay_arg;
  162 #endif
  163 
  164 struct kva_md_info kmi;
  165 /*
  166  * arm32_vector_init:
  167  *
  168  *      Initialize the vector page, and select whether or not to
  169  *      relocate the vectors.
  170  *
  171  *      NOTE: We expect the vector page to be mapped at its expected
  172  *      destination.
  173  */
  174 
  175 extern unsigned int page0[], page0_data[];
  176 void
  177 arm_vector_init(vm_offset_t va, int which)
  178 {
  179         unsigned int *vectors = (int *) va;
  180         unsigned int *vectors_data = vectors + (page0_data - page0);
  181         int vec;
  182 
  183         /*
  184          * Loop through the vectors we're taking over, and copy the
  185          * vector's insn and data word.
  186          */
  187         for (vec = 0; vec < ARM_NVEC; vec++) {
  188                 if ((which & (1 << vec)) == 0) {
  189                         /* Don't want to take over this vector. */
  190                         continue;
  191                 }
  192                 vectors[vec] = page0[vec];
  193                 vectors_data[vec] = page0_data[vec];
  194         }
  195 
  196         /* Now sync the vectors. */
  197         icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
  198 
  199         vector_page = va;
  200 #if __ARM_ARCH < 6
  201         if (va == ARM_VECTORS_HIGH) {
  202                 /*
  203                  * Enable high vectors in the system control reg (SCTLR).
  204                  *
  205                  * Assume the MD caller knows what it's doing here, and really
  206                  * does want the vector page relocated.
  207                  *
  208                  * Note: This has to be done here (and not just in
  209                  * cpu_setup()) because the vector page needs to be
  210                  * accessible *before* cpu_startup() is called.
  211                  * Think ddb(9) ...
  212                  */
  213                 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
  214         }
  215 #endif
  216 }
  217 
  218 static void
  219 cpu_startup(void *dummy)
  220 {
  221         struct pcb *pcb = thread0.td_pcb;
  222         const unsigned int mbyte = 1024 * 1024;
  223 #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
  224         vm_page_t m;
  225 #endif
  226 
  227         identify_arm_cpu();
  228 
  229         vm_ksubmap_init(&kmi);
  230 
  231         /*
  232          * Display the RAM layout.
  233          */
  234         printf("real memory  = %ju (%ju MB)\n",
  235             (uintmax_t)arm32_ptob(realmem),
  236             (uintmax_t)arm32_ptob(realmem) / mbyte);
  237         printf("avail memory = %ju (%ju MB)\n",
  238             (uintmax_t)arm32_ptob(vm_free_count()),
  239             (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
  240         if (bootverbose) {
  241                 physmem_print_tables();
  242                 devmap_print_table();
  243         }
  244 
  245         bufinit();
  246         vm_pager_bufferinit();
  247         pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
  248             USPACE_SVC_STACK_TOP;
  249         pmap_set_pcb_pagedir(kernel_pmap, pcb);
  250 #if __ARM_ARCH < 6
  251         vector_page_setprot(VM_PROT_READ);
  252         pmap_postinit();
  253 #ifdef ARM_CACHE_LOCK_ENABLE
  254         pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
  255         arm_lock_cache_line(ARM_TP_ADDRESS);
  256 #else
  257         m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO);
  258         pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m));
  259 #endif
  260         *(uint32_t *)ARM_RAS_START = 0;
  261         *(uint32_t *)ARM_RAS_END = 0xffffffff;
  262 #endif
  263 }
  264 
  265 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
  266 
  267 /*
  268  * Flush the D-cache for non-DMA I/O so that the I-cache can
  269  * be made coherent later.
  270  */
  271 void
  272 cpu_flush_dcache(void *ptr, size_t len)
  273 {
  274 
  275         dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
  276 }
  277 
  278 /* Get current clock frequency for the given cpu id. */
  279 int
  280 cpu_est_clockrate(int cpu_id, uint64_t *rate)
  281 {
  282 #if __ARM_ARCH >= 6
  283         struct pcpu *pc;
  284 
  285         pc = pcpu_find(cpu_id);
  286         if (pc == NULL || rate == NULL)
  287                 return (EINVAL);
  288 
  289         if (pc->pc_clock == 0)
  290                 return (EOPNOTSUPP);
  291 
  292         *rate = pc->pc_clock;
  293 
  294         return (0);
  295 #else
  296         return (ENXIO);
  297 #endif
  298 }
  299 
  300 void
  301 cpu_idle(int busy)
  302 {
  303 
  304         CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
  305         spinlock_enter();
  306 #ifndef NO_EVENTTIMERS
  307         if (!busy)
  308                 cpu_idleclock();
  309 #endif
  310         if (!sched_runnable())
  311                 cpu_sleep(0);
  312 #ifndef NO_EVENTTIMERS
  313         if (!busy)
  314                 cpu_activeclock();
  315 #endif
  316         spinlock_exit();
  317         CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
  318 }
  319 
  320 int
  321 cpu_idle_wakeup(int cpu)
  322 {
  323 
  324         return (0);
  325 }
  326 
  327 #ifdef NO_EVENTTIMERS
  328 /*
  329  * Most ARM platforms don't need to do anything special to init their clocks
  330  * (they get intialized during normal device attachment), and by not defining a
  331  * cpu_initclocks() function they get this generic one.  Any platform that needs
  332  * to do something special can just provide their own implementation, which will
  333  * override this one due to the weak linkage.
  334  */
  335 void
  336 arm_generic_initclocks(void)
  337 {
  338 }
  339 __weak_reference(arm_generic_initclocks, cpu_initclocks);
  340 
  341 #else
  342 void
  343 cpu_initclocks(void)
  344 {
  345 
  346 #ifdef SMP
  347         if (PCPU_GET(cpuid) == 0)
  348                 cpu_initclocks_bsp();
  349         else
  350                 cpu_initclocks_ap();
  351 #else
  352         cpu_initclocks_bsp();
  353 #endif
  354 }
  355 #endif
  356 
  357 #ifdef PLATFORM
  358 void
  359 arm_set_delay(delay_func *impl, void *arg)
  360 {
  361 
  362         KASSERT(impl != NULL, ("No DELAY implementation"));
  363         delay_impl = impl;
  364         delay_arg = arg;
  365 }
  366 
  367 void
  368 DELAY(int usec)
  369 {
  370 
  371         TSENTER();
  372         delay_impl(usec, delay_arg);
  373         TSEXIT();
  374 }
  375 #endif
  376 
  377 void
  378 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
  379 {
  380 
  381 #if __ARM_ARCH >= 6
  382         pcpu->pc_mpidr = 0xffffffff;
  383 #endif
  384 }
  385 
  386 void
  387 spinlock_enter(void)
  388 {
  389         struct thread *td;
  390         register_t cspr;
  391 
  392         td = curthread;
  393         if (td->td_md.md_spinlock_count == 0) {
  394                 cspr = disable_interrupts(PSR_I | PSR_F);
  395                 td->td_md.md_spinlock_count = 1;
  396                 td->td_md.md_saved_cspr = cspr;
  397         } else
  398                 td->td_md.md_spinlock_count++;
  399         critical_enter();
  400 }
  401 
  402 void
  403 spinlock_exit(void)
  404 {
  405         struct thread *td;
  406         register_t cspr;
  407 
  408         td = curthread;
  409         critical_exit();
  410         cspr = td->td_md.md_saved_cspr;
  411         td->td_md.md_spinlock_count--;
  412         if (td->td_md.md_spinlock_count == 0)
  413                 restore_interrupts(cspr);
  414 }
  415 
  416 /*
  417  * Clear registers on exec
  418  */
  419 void
  420 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
  421 {
  422         struct trapframe *tf = td->td_frame;
  423 
  424         memset(tf, 0, sizeof(*tf));
  425         tf->tf_usr_sp = stack;
  426         tf->tf_usr_lr = imgp->entry_addr;
  427         tf->tf_svc_lr = 0x77777777;
  428         tf->tf_pc = imgp->entry_addr;
  429         tf->tf_spsr = PSR_USR32_MODE;
  430         if ((register_t)imgp->entry_addr & 1)
  431                 tf->tf_spsr |= PSR_T;
  432 }
  433 
  434 
  435 #ifdef VFP
  436 /*
  437  * Get machine VFP context.
  438  */
  439 void
  440 get_vfpcontext(struct thread *td, mcontext_vfp_t *vfp)
  441 {
  442         struct pcb *pcb;
  443 
  444         pcb = td->td_pcb;
  445         if (td == curthread) {
  446                 critical_enter();
  447                 vfp_store(&pcb->pcb_vfpstate, false);
  448                 critical_exit();
  449         } else
  450                 MPASS(TD_IS_SUSPENDED(td));
  451         memcpy(vfp->mcv_reg, pcb->pcb_vfpstate.reg,
  452             sizeof(vfp->mcv_reg));
  453         vfp->mcv_fpscr = pcb->pcb_vfpstate.fpscr;
  454 }
  455 
  456 /*
  457  * Set machine VFP context.
  458  */
  459 void
  460 set_vfpcontext(struct thread *td, mcontext_vfp_t *vfp)
  461 {
  462         struct pcb *pcb;
  463 
  464         pcb = td->td_pcb;
  465         if (td == curthread) {
  466                 critical_enter();
  467                 vfp_discard(td);
  468                 critical_exit();
  469         } else
  470                 MPASS(TD_IS_SUSPENDED(td));
  471         memcpy(pcb->pcb_vfpstate.reg, vfp->mcv_reg,
  472             sizeof(pcb->pcb_vfpstate.reg));
  473         pcb->pcb_vfpstate.fpscr = vfp->mcv_fpscr;
  474 }
  475 #endif
  476 
  477 int
  478 arm_get_vfpstate(struct thread *td, void *args)
  479 {
  480         int rv;
  481         struct arm_get_vfpstate_args ua;
  482         mcontext_vfp_t  mcontext_vfp;
  483 
  484         rv = copyin(args, &ua, sizeof(ua));
  485         if (rv != 0)
  486                 return (rv);
  487         if (ua.mc_vfp_size != sizeof(mcontext_vfp_t))
  488                 return (EINVAL);
  489 #ifdef VFP
  490         get_vfpcontext(td, &mcontext_vfp);
  491 #else
  492         bzero(&mcontext_vfp, sizeof(mcontext_vfp));
  493 #endif
  494 
  495         rv = copyout(&mcontext_vfp, ua.mc_vfp,  sizeof(mcontext_vfp));
  496         if (rv != 0)
  497                 return (rv);
  498         return (0);
  499 }
  500 
  501 /*
  502  * Get machine context.
  503  */
  504 int
  505 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
  506 {
  507         struct trapframe *tf = td->td_frame;
  508         __greg_t *gr = mcp->__gregs;
  509 
  510         if (clear_ret & GET_MC_CLEAR_RET) {
  511                 gr[_REG_R0] = 0;
  512                 gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C;
  513         } else {
  514                 gr[_REG_R0]   = tf->tf_r0;
  515                 gr[_REG_CPSR] = tf->tf_spsr;
  516         }
  517         gr[_REG_R1]   = tf->tf_r1;
  518         gr[_REG_R2]   = tf->tf_r2;
  519         gr[_REG_R3]   = tf->tf_r3;
  520         gr[_REG_R4]   = tf->tf_r4;
  521         gr[_REG_R5]   = tf->tf_r5;
  522         gr[_REG_R6]   = tf->tf_r6;
  523         gr[_REG_R7]   = tf->tf_r7;
  524         gr[_REG_R8]   = tf->tf_r8;
  525         gr[_REG_R9]   = tf->tf_r9;
  526         gr[_REG_R10]  = tf->tf_r10;
  527         gr[_REG_R11]  = tf->tf_r11;
  528         gr[_REG_R12]  = tf->tf_r12;
  529         gr[_REG_SP]   = tf->tf_usr_sp;
  530         gr[_REG_LR]   = tf->tf_usr_lr;
  531         gr[_REG_PC]   = tf->tf_pc;
  532 
  533         mcp->mc_vfp_size = 0;
  534         mcp->mc_vfp_ptr = NULL;
  535         memset(&mcp->mc_spare, 0, sizeof(mcp->mc_spare));
  536 
  537         return (0);
  538 }
  539 
  540 /*
  541  * Set machine context.
  542  *
  543  * However, we don't set any but the user modifiable flags, and we won't
  544  * touch the cs selector.
  545  */
  546 int
  547 set_mcontext(struct thread *td, mcontext_t *mcp)
  548 {
  549         mcontext_vfp_t mc_vfp, *vfp;
  550         struct trapframe *tf = td->td_frame;
  551         const __greg_t *gr = mcp->__gregs;
  552         int spsr;
  553 
  554         /*
  555          * Make sure the processor mode has not been tampered with and
  556          * interrupts have not been disabled.
  557          */
  558         spsr = gr[_REG_CPSR];
  559         if ((spsr & PSR_MODE) != PSR_USR32_MODE ||
  560             (spsr & (PSR_I | PSR_F)) != 0)
  561                 return (EINVAL);
  562 
  563 #ifdef WITNESS
  564         if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(mc_vfp)) {
  565                 printf("%s: %s: Malformed mc_vfp_size: %d (0x%08X)\n",
  566                     td->td_proc->p_comm, __func__,
  567                     mcp->mc_vfp_size, mcp->mc_vfp_size);
  568         } else if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr == NULL) {
  569                 printf("%s: %s: c_vfp_size != 0 but mc_vfp_ptr == NULL\n",
  570                     td->td_proc->p_comm, __func__);
  571         }
  572 #endif
  573 
  574         if (mcp->mc_vfp_size == sizeof(mc_vfp) && mcp->mc_vfp_ptr != NULL) {
  575                 if (copyin(mcp->mc_vfp_ptr, &mc_vfp, sizeof(mc_vfp)) != 0)
  576                         return (EFAULT);
  577                 vfp = &mc_vfp;
  578         } else {
  579                 vfp = NULL;
  580         }
  581 
  582         tf->tf_r0 = gr[_REG_R0];
  583         tf->tf_r1 = gr[_REG_R1];
  584         tf->tf_r2 = gr[_REG_R2];
  585         tf->tf_r3 = gr[_REG_R3];
  586         tf->tf_r4 = gr[_REG_R4];
  587         tf->tf_r5 = gr[_REG_R5];
  588         tf->tf_r6 = gr[_REG_R6];
  589         tf->tf_r7 = gr[_REG_R7];
  590         tf->tf_r8 = gr[_REG_R8];
  591         tf->tf_r9 = gr[_REG_R9];
  592         tf->tf_r10 = gr[_REG_R10];
  593         tf->tf_r11 = gr[_REG_R11];
  594         tf->tf_r12 = gr[_REG_R12];
  595         tf->tf_usr_sp = gr[_REG_SP];
  596         tf->tf_usr_lr = gr[_REG_LR];
  597         tf->tf_pc = gr[_REG_PC];
  598         tf->tf_spsr = gr[_REG_CPSR];
  599 #ifdef VFP
  600         if (vfp != NULL)
  601                 set_vfpcontext(td, vfp);
  602 #endif
  603         return (0);
  604 }
  605 
  606 void
  607 sendsig(catcher, ksi, mask)
  608         sig_t catcher;
  609         ksiginfo_t *ksi;
  610         sigset_t *mask;
  611 {
  612         struct thread *td;
  613         struct proc *p;
  614         struct trapframe *tf;
  615         struct sigframe *fp, frame;
  616         struct sigacts *psp;
  617         struct sysentvec *sysent;
  618         int onstack;
  619         int sig;
  620         int code;
  621 
  622         td = curthread;
  623         p = td->td_proc;
  624         PROC_LOCK_ASSERT(p, MA_OWNED);
  625         sig = ksi->ksi_signo;
  626         code = ksi->ksi_code;
  627         psp = p->p_sigacts;
  628         mtx_assert(&psp->ps_mtx, MA_OWNED);
  629         tf = td->td_frame;
  630         onstack = sigonstack(tf->tf_usr_sp);
  631 
  632         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
  633             catcher, sig);
  634 
  635         /* Allocate and validate space for the signal handler context. */
  636         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) &&
  637             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  638                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
  639                     td->td_sigstk.ss_size);
  640 #if defined(COMPAT_43)
  641                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  642 #endif
  643         } else
  644                 fp = (struct sigframe *)td->td_frame->tf_usr_sp;
  645 
  646         /* make room on the stack */
  647         fp--;
  648 
  649         /* make the stack aligned */
  650         fp = (struct sigframe *)STACKALIGN(fp);
  651         /* Populate the siginfo frame. */
  652         bzero(&frame, sizeof(frame));
  653         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
  654 #ifdef VFP
  655         get_vfpcontext(td, &frame.sf_vfp);
  656         frame.sf_uc.uc_mcontext.mc_vfp_size = sizeof(fp->sf_vfp);
  657         frame.sf_uc.uc_mcontext.mc_vfp_ptr = &fp->sf_vfp;
  658 #else
  659         frame.sf_uc.uc_mcontext.mc_vfp_size = 0;
  660         frame.sf_uc.uc_mcontext.mc_vfp_ptr = NULL;
  661 #endif
  662         frame.sf_si = ksi->ksi_info;
  663         frame.sf_uc.uc_sigmask = *mask;
  664         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK )
  665             ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  666         frame.sf_uc.uc_stack = td->td_sigstk;
  667         mtx_unlock(&psp->ps_mtx);
  668         PROC_UNLOCK(td->td_proc);
  669 
  670         /* Copy the sigframe out to the user's stack. */
  671         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
  672                 /* Process has trashed its stack. Kill it. */
  673                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
  674                 PROC_LOCK(p);
  675                 sigexit(td, SIGILL);
  676         }
  677 
  678         /*
  679          * Build context to run handler in.  We invoke the handler
  680          * directly, only returning via the trampoline.  Note the
  681          * trampoline version numbers are coordinated with machine-
  682          * dependent code in libc.
  683          */
  684 
  685         tf->tf_r0 = sig;
  686         tf->tf_r1 = (register_t)&fp->sf_si;
  687         tf->tf_r2 = (register_t)&fp->sf_uc;
  688 
  689         /* the trampoline uses r5 as the uc address */
  690         tf->tf_r5 = (register_t)&fp->sf_uc;
  691         tf->tf_pc = (register_t)catcher;
  692         tf->tf_usr_sp = (register_t)fp;
  693         sysent = p->p_sysent;
  694         if (sysent->sv_sigcode_base != 0)
  695                 tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base;
  696         else
  697                 tf->tf_usr_lr = (register_t)(sysent->sv_psstrings -
  698                     *(sysent->sv_szsigcode));
  699         /* Set the mode to enter in the signal handler */
  700 #if __ARM_ARCH >= 7
  701         if ((register_t)catcher & 1)
  702                 tf->tf_spsr |= PSR_T;
  703         else
  704                 tf->tf_spsr &= ~PSR_T;
  705 #endif
  706 
  707         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
  708             tf->tf_usr_sp);
  709 
  710         PROC_LOCK(p);
  711         mtx_lock(&psp->ps_mtx);
  712 }
  713 
  714 int
  715 sys_sigreturn(td, uap)
  716         struct thread *td;
  717         struct sigreturn_args /* {
  718                 const struct __ucontext *sigcntxp;
  719         } */ *uap;
  720 {
  721         ucontext_t uc;
  722         int error;
  723 
  724         if (uap == NULL)
  725                 return (EFAULT);
  726         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
  727                 return (EFAULT);
  728         /* Restore register context. */
  729         error = set_mcontext(td, &uc.uc_mcontext);
  730         if (error != 0)
  731                 return (error);
  732 
  733         /* Restore signal mask. */
  734         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
  735 
  736         return (EJUSTRETURN);
  737 }
  738 
  739 /*
  740  * Construct a PCB from a trapframe. This is called from kdb_trap() where
  741  * we want to start a backtrace from the function that caused us to enter
  742  * the debugger. We have the context in the trapframe, but base the trace
  743  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
  744  * enough for a backtrace.
  745  */
  746 void
  747 makectx(struct trapframe *tf, struct pcb *pcb)
  748 {
  749         pcb->pcb_regs.sf_r4 = tf->tf_r4;
  750         pcb->pcb_regs.sf_r5 = tf->tf_r5;
  751         pcb->pcb_regs.sf_r6 = tf->tf_r6;
  752         pcb->pcb_regs.sf_r7 = tf->tf_r7;
  753         pcb->pcb_regs.sf_r8 = tf->tf_r8;
  754         pcb->pcb_regs.sf_r9 = tf->tf_r9;
  755         pcb->pcb_regs.sf_r10 = tf->tf_r10;
  756         pcb->pcb_regs.sf_r11 = tf->tf_r11;
  757         pcb->pcb_regs.sf_r12 = tf->tf_r12;
  758         pcb->pcb_regs.sf_pc = tf->tf_pc;
  759         pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
  760         pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
  761 }
  762 
  763 void
  764 pcpu0_init(void)
  765 {
  766 #if __ARM_ARCH >= 6
  767         set_curthread(&thread0);
  768 #endif
  769         pcpu_init(pcpup, 0, sizeof(struct pcpu));
  770 #if __ARM_ARCH >= 6
  771         pcpup->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
  772 #endif
  773         PCPU_SET(curthread, &thread0);
  774 }
  775 
  776 /*
  777  * Initialize proc0
  778  */
  779 void
  780 init_proc0(vm_offset_t kstack)
  781 {
  782         proc_linkup0(&proc0, &thread0);
  783         thread0.td_kstack = kstack;
  784         thread0.td_kstack_pages = kstack_pages;
  785         thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
  786             thread0.td_kstack_pages * PAGE_SIZE) - 1;
  787         thread0.td_pcb->pcb_flags = 0;
  788         thread0.td_pcb->pcb_vfpcpu = -1;
  789         thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
  790         thread0.td_frame = &proc0_tf;
  791         pcpup->pc_curpcb = thread0.td_pcb;
  792 }
  793 
  794 #if __ARM_ARCH >= 6
  795 void
  796 set_stackptrs(int cpu)
  797 {
  798 
  799         set_stackptr(PSR_IRQ32_MODE,
  800             irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  801         set_stackptr(PSR_ABT32_MODE,
  802             abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  803         set_stackptr(PSR_UND32_MODE,
  804             undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  805 }
  806 #else
  807 void
  808 set_stackptrs(int cpu)
  809 {
  810 
  811         set_stackptr(PSR_IRQ32_MODE,
  812             irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  813         set_stackptr(PSR_ABT32_MODE,
  814             abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  815         set_stackptr(PSR_UND32_MODE,
  816             undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
  817 }
  818 #endif
  819 
  820 static void
  821 arm_kdb_init(void)
  822 {
  823 
  824         kdb_init();
  825 #ifdef KDB
  826         if (boothowto & RB_KDB)
  827                 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
  828 #endif
  829 }
  830 
  831 #ifdef FDT
  832 #if __ARM_ARCH < 6
  833 void *
  834 initarm(struct arm_boot_params *abp)
  835 {
  836         struct mem_region mem_regions[FDT_MEM_REGIONS];
  837         struct pv_addr kernel_l1pt;
  838         struct pv_addr dpcpu;
  839         vm_offset_t dtbp, freemempos, l2_start, lastaddr;
  840         uint64_t memsize;
  841         uint32_t l2size;
  842         char *env;
  843         void *kmdp;
  844         u_int l1pagetable;
  845         int i, j, err_devmap, mem_regions_sz;
  846 
  847         lastaddr = parse_boot_param(abp);
  848         arm_physmem_kernaddr = abp->abp_physaddr;
  849 
  850         memsize = 0;
  851 
  852         cpuinfo_init();
  853         set_cpufuncs();
  854 
  855         /*
  856          * Find the dtb passed in by the boot loader.
  857          */
  858         kmdp = preload_search_by_type("elf kernel");
  859         if (kmdp != NULL)
  860                 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
  861         else
  862                 dtbp = (vm_offset_t)NULL;
  863 
  864 #if defined(FDT_DTB_STATIC)
  865         /*
  866          * In case the device tree blob was not retrieved (from metadata) try
  867          * to use the statically embedded one.
  868          */
  869         if (dtbp == (vm_offset_t)NULL)
  870                 dtbp = (vm_offset_t)&fdt_static_dtb;
  871 #endif
  872 
  873         if (OF_install(OFW_FDT, 0) == FALSE)
  874                 panic("Cannot install FDT");
  875 
  876         if (OF_init((void *)dtbp) != 0)
  877                 panic("OF_init failed with the found device tree");
  878 
  879         /* Grab physical memory regions information from device tree. */
  880         if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0)
  881                 panic("Cannot get physical memory regions");
  882         physmem_hardware_regions(mem_regions, mem_regions_sz);
  883 
  884         /* Grab reserved memory regions information from device tree. */
  885         if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
  886                 physmem_exclude_regions(mem_regions, mem_regions_sz,
  887                     EXFLAG_NODUMP | EXFLAG_NOALLOC);
  888 
  889         /* Platform-specific initialisation */
  890         platform_probe_and_attach();
  891 
  892         pcpu0_init();
  893 
  894         /* Do basic tuning, hz etc */
  895         init_param1();
  896 
  897         /* Calculate number of L2 tables needed for mapping vm_page_array */
  898         l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
  899         l2size = (l2size >> L1_S_SHIFT) + 1;
  900 
  901         /*
  902          * Add one table for end of kernel map, one for stacks, msgbuf and
  903          * L1 and L2 tables map,  one for vectors map and two for
  904          * l2 structures from pmap_bootstrap.
  905          */
  906         l2size += 5;
  907 
  908         /* Make it divisible by 4 */
  909         l2size = (l2size + 3) & ~3;
  910 
  911         freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
  912 
  913         /* Define a macro to simplify memory allocation */
  914 #define valloc_pages(var, np)                                           \
  915         alloc_pages((var).pv_va, (np));                                 \
  916         (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR);
  917 
  918 #define alloc_pages(var, np)                                            \
  919         (var) = freemempos;                                             \
  920         freemempos += (np * PAGE_SIZE);                                 \
  921         memset((char *)(var), 0, ((np) * PAGE_SIZE));
  922 
  923         while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
  924                 freemempos += PAGE_SIZE;
  925         valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
  926 
  927         for (i = 0, j = 0; i < l2size; ++i) {
  928                 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
  929                         valloc_pages(kernel_pt_table[i],
  930                             L2_TABLE_SIZE / PAGE_SIZE);
  931                         j = i;
  932                 } else {
  933                         kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
  934                             L2_TABLE_SIZE_REAL * (i - j);
  935                         kernel_pt_table[i].pv_pa =
  936                             kernel_pt_table[i].pv_va - KERNVIRTADDR +
  937                             abp->abp_physaddr;
  938 
  939                 }
  940         }
  941         /*
  942          * Allocate a page for the system page mapped to 0x00000000
  943          * or 0xffff0000. This page will just contain the system vectors
  944          * and can be shared by all processes.
  945          */
  946         valloc_pages(systempage, 1);
  947 
  948         /* Allocate dynamic per-cpu area. */
  949         valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
  950         dpcpu_init((void *)dpcpu.pv_va, 0);
  951 
  952         /* Allocate stacks for all modes */
  953         valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
  954         valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
  955         valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
  956         valloc_pages(kernelstack, kstack_pages);
  957         valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
  958 
  959         /*
  960          * Now we start construction of the L1 page table
  961          * We start by mapping the L2 page tables into the L1.
  962          * This means that we can replace L1 mappings later on if necessary
  963          */
  964         l1pagetable = kernel_l1pt.pv_va;
  965 
  966         /*
  967          * Try to map as much as possible of kernel text and data using
  968          * 1MB section mapping and for the rest of initial kernel address
  969          * space use L2 coarse tables.
  970          *
  971          * Link L2 tables for mapping remainder of kernel (modulo 1MB)
  972          * and kernel structures
  973          */
  974         l2_start = lastaddr & ~(L1_S_OFFSET);
  975         for (i = 0 ; i < l2size - 1; i++)
  976                 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
  977                     &kernel_pt_table[i]);
  978 
  979         pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
  980 
  981         /* Map kernel code and data */
  982         pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr,
  983            (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
  984             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  985 
  986         /* Map L1 directory and allocated L2 page tables */
  987         pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
  988             L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
  989 
  990         pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
  991             kernel_pt_table[0].pv_pa,
  992             L2_TABLE_SIZE_REAL * l2size,
  993             VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
  994 
  995         /* Map allocated DPCPU, stacks and msgbuf */
  996         pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
  997             freemempos - dpcpu.pv_va,
  998             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  999 
 1000         /* Link and map the vector page */
 1001         pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
 1002             &kernel_pt_table[l2size - 1]);
 1003         pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
 1004             VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
 1005 
 1006         /* Establish static device mappings. */
 1007         err_devmap = platform_devmap_init();
 1008         devmap_bootstrap(l1pagetable, NULL);
 1009         vm_max_kernel_address = platform_lastaddr();
 1010 
 1011         cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
 1012         pmap_pa = kernel_l1pt.pv_pa;
 1013         cpu_setttb(kernel_l1pt.pv_pa);
 1014         cpu_tlb_flushID();
 1015         cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
 1016 
 1017         /*
 1018          * Now that proper page tables are installed, call cpu_setup() to enable
 1019          * instruction and data caches and other chip-specific features.
 1020          */
 1021         cpu_setup();
 1022 
 1023         /*
 1024          * Only after the SOC registers block is mapped we can perform device
 1025          * tree fixups, as they may attempt to read parameters from hardware.
 1026          */
 1027         OF_interpret("perform-fixup", 0);
 1028 
 1029         platform_gpio_init();
 1030 
 1031         cninit();
 1032 
 1033         debugf("initarm: console initialized\n");
 1034         debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
 1035         debugf(" boothowto = 0x%08x\n", boothowto);
 1036         debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
 1037         arm_print_kenv();
 1038 
 1039         env = kern_getenv("kernelname");
 1040         if (env != NULL) {
 1041                 strlcpy(kernelname, env, sizeof(kernelname));
 1042                 freeenv(env);
 1043         }
 1044 
 1045         if (err_devmap != 0)
 1046                 printf("WARNING: could not fully configure devmap, error=%d\n",
 1047                     err_devmap);
 1048 
 1049         platform_late_init();
 1050 
 1051         /*
 1052          * Pages were allocated during the secondary bootstrap for the
 1053          * stacks for different CPU modes.
 1054          * We must now set the r13 registers in the different CPU modes to
 1055          * point to these stacks.
 1056          * Since the ARM stacks use STMFD etc. we must set r13 to the top end
 1057          * of the stack memory.
 1058          */
 1059         cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
 1060 
 1061         set_stackptrs(0);
 1062 
 1063         /*
 1064          * We must now clean the cache again....
 1065          * Cleaning may be done by reading new data to displace any
 1066          * dirty data in the cache. This will have happened in cpu_setttb()
 1067          * but since we are boot strapping the addresses used for the read
 1068          * may have just been remapped and thus the cache could be out
 1069          * of sync. A re-clean after the switch will cure this.
 1070          * After booting there are no gross relocations of the kernel thus
 1071          * this problem will not occur after initarm().
 1072          */
 1073         cpu_idcache_wbinv_all();
 1074 
 1075         undefined_init();
 1076 
 1077         init_proc0(kernelstack.pv_va);
 1078 
 1079         arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
 1080         pmap_bootstrap(freemempos, &kernel_l1pt);
 1081         msgbufp = (void *)msgbufpv.pv_va;
 1082         msgbufinit(msgbufp, msgbufsize);
 1083         mutex_init();
 1084 
 1085         /*
 1086          * Exclude the kernel (and all the things we allocated which immediately
 1087          * follow the kernel) from the VM allocation pool but not from crash
 1088          * dumps.  virtual_avail is a global variable which tracks the kva we've
 1089          * "allocated" while setting up pmaps.
 1090          *
 1091          * Prepare the list of physical memory available to the vm subsystem.
 1092          */
 1093         physmem_exclude_region(abp->abp_physaddr,
 1094             (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC);
 1095         physmem_init_kernel_globals();
 1096 
 1097         init_param2(physmem);
 1098         dbg_monitor_init();
 1099         arm_kdb_init();
 1100 
 1101         return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
 1102             sizeof(struct pcb)));
 1103 }
 1104 #else /* __ARM_ARCH < 6 */
 1105 void *
 1106 initarm(struct arm_boot_params *abp)
 1107 {
 1108         struct mem_region mem_regions[FDT_MEM_REGIONS];
 1109         vm_paddr_t lastaddr;
 1110         vm_offset_t dtbp, kernelstack, dpcpu;
 1111         char *env;
 1112         void *kmdp;
 1113         int err_devmap, mem_regions_sz;
 1114 #ifdef EFI
 1115         struct efi_map_header *efihdr;
 1116 #endif
 1117 
 1118         /* get last allocated physical address */
 1119         arm_physmem_kernaddr = abp->abp_physaddr;
 1120         lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
 1121 
 1122         set_cpufuncs();
 1123         cpuinfo_init();
 1124 
 1125         /*
 1126          * Find the dtb passed in by the boot loader.
 1127          */
 1128         kmdp = preload_search_by_type("elf kernel");
 1129         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
 1130 #if defined(FDT_DTB_STATIC)
 1131         /*
 1132          * In case the device tree blob was not retrieved (from metadata) try
 1133          * to use the statically embedded one.
 1134          */
 1135         if (dtbp == (vm_offset_t)NULL)
 1136                 dtbp = (vm_offset_t)&fdt_static_dtb;
 1137 #endif
 1138 
 1139         if (OF_install(OFW_FDT, 0) == FALSE)
 1140                 panic("Cannot install FDT");
 1141 
 1142         if (OF_init((void *)dtbp) != 0)
 1143                 panic("OF_init failed with the found device tree");
 1144 
 1145 #if defined(LINUX_BOOT_ABI)
 1146         arm_parse_fdt_bootargs();
 1147 #endif
 1148 
 1149 #ifdef EFI
 1150         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
 1151             MODINFO_METADATA | MODINFOMD_EFI_MAP);
 1152         if (efihdr != NULL) {
 1153                 arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
 1154         } else
 1155 #endif
 1156         {
 1157                 /* Grab physical memory regions information from device tree. */
 1158                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
 1159                         panic("Cannot get physical memory regions");
 1160         }
 1161         physmem_hardware_regions(mem_regions, mem_regions_sz);
 1162 
 1163         /* Grab reserved memory regions information from device tree. */
 1164         if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
 1165                 physmem_exclude_regions(mem_regions, mem_regions_sz,
 1166                     EXFLAG_NODUMP | EXFLAG_NOALLOC);
 1167 
 1168         /*
 1169          * Set TEX remapping registers.
 1170          * Setup kernel page tables and switch to kernel L1 page table.
 1171          */
 1172         pmap_set_tex();
 1173         pmap_bootstrap_prepare(lastaddr);
 1174 
 1175         /*
 1176          * If EARLY_PRINTF support is enabled, we need to re-establish the
 1177          * mapping after pmap_bootstrap_prepare() switches to new page tables.
 1178          * Note that we can only do the remapping if the VA is outside the
 1179          * kernel, now that we have real virtual (not VA=PA) mappings in effect.
 1180          * Early printf does not work between the time pmap_set_tex() does
 1181          * cp15_prrr_set() and this code remaps the VA.
 1182          */
 1183 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
 1184         pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024, 
 1185             VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
 1186 #endif
 1187 
 1188         /*
 1189          * Now that proper page tables are installed, call cpu_setup() to enable
 1190          * instruction and data caches and other chip-specific features.
 1191          */
 1192         cpu_setup();
 1193 
 1194         /* Platform-specific initialisation */
 1195         platform_probe_and_attach();
 1196         pcpu0_init();
 1197 
 1198         /* Do basic tuning, hz etc */
 1199         init_param1();
 1200 
 1201         /*
 1202          * Allocate a page for the system page mapped to 0xffff0000
 1203          * This page will just contain the system vectors and can be
 1204          * shared by all processes.
 1205          */
 1206         systempage = pmap_preboot_get_pages(1);
 1207 
 1208         /* Map the vector page. */
 1209         pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
 1210         if (virtual_end >= ARM_VECTORS_HIGH)
 1211                 virtual_end = ARM_VECTORS_HIGH - 1;
 1212 
 1213         /* Allocate dynamic per-cpu area. */
 1214         dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
 1215         dpcpu_init((void *)dpcpu, 0);
 1216 
 1217         /* Allocate stacks for all modes */
 1218         irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
 1219         abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
 1220         undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
 1221         kernelstack = pmap_preboot_get_vpages(kstack_pages);
 1222 
 1223         /* Allocate message buffer. */
 1224         msgbufp = (void *)pmap_preboot_get_vpages(
 1225             round_page(msgbufsize) / PAGE_SIZE);
 1226 
 1227         /*
 1228          * Pages were allocated during the secondary bootstrap for the
 1229          * stacks for different CPU modes.
 1230          * We must now set the r13 registers in the different CPU modes to
 1231          * point to these stacks.
 1232          * Since the ARM stacks use STMFD etc. we must set r13 to the top end
 1233          * of the stack memory.
 1234          */
 1235         set_stackptrs(0);
 1236         mutex_init();
 1237 
 1238         /* Establish static device mappings. */
 1239         err_devmap = platform_devmap_init();
 1240         devmap_bootstrap(0, NULL);
 1241         vm_max_kernel_address = platform_lastaddr();
 1242 
 1243         /*
 1244          * Only after the SOC registers block is mapped we can perform device
 1245          * tree fixups, as they may attempt to read parameters from hardware.
 1246          */
 1247         OF_interpret("perform-fixup", 0);
 1248         platform_gpio_init();
 1249         cninit();
 1250 
 1251         /*
 1252          * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(),
 1253          * undo it now that the normal console printf works.
 1254          */
 1255 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
 1256         pmap_kremove(SOCDEV_VA);
 1257 #endif
 1258 
 1259         debugf("initarm: console initialized\n");
 1260         debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
 1261         debugf(" boothowto = 0x%08x\n", boothowto);
 1262         debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
 1263         debugf(" lastaddr1: 0x%08x\n", lastaddr);
 1264         arm_print_kenv();
 1265 
 1266         env = kern_getenv("kernelname");
 1267         if (env != NULL)
 1268                 strlcpy(kernelname, env, sizeof(kernelname));
 1269 
 1270         if (err_devmap != 0)
 1271                 printf("WARNING: could not fully configure devmap, error=%d\n",
 1272                     err_devmap);
 1273 
 1274         platform_late_init();
 1275 
 1276         /*
 1277          * We must now clean the cache again....
 1278          * Cleaning may be done by reading new data to displace any
 1279          * dirty data in the cache. This will have happened in cpu_setttb()
 1280          * but since we are boot strapping the addresses used for the read
 1281          * may have just been remapped and thus the cache could be out
 1282          * of sync. A re-clean after the switch will cure this.
 1283          * After booting there are no gross relocations of the kernel thus
 1284          * this problem will not occur after initarm().
 1285          */
 1286         /* Set stack for exception handlers */
 1287         undefined_init();
 1288         init_proc0(kernelstack);
 1289         arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
 1290         enable_interrupts(PSR_A);
 1291         pmap_bootstrap(0);
 1292 
 1293         /* Exclude the kernel (and all the things we allocated which immediately
 1294          * follow the kernel) from the VM allocation pool but not from crash
 1295          * dumps.  virtual_avail is a global variable which tracks the kva we've
 1296          * "allocated" while setting up pmaps.
 1297          *
 1298          * Prepare the list of physical memory available to the vm subsystem.
 1299          */
 1300         physmem_exclude_region(abp->abp_physaddr,
 1301                 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
 1302         physmem_init_kernel_globals();
 1303 
 1304         init_param2(physmem);
 1305         /* Init message buffer. */
 1306         msgbufinit(msgbufp, msgbufsize);
 1307         dbg_monitor_init();
 1308         arm_kdb_init();
 1309         /* Apply possible BP hardening. */
 1310         cpuinfo_init_bp_hardening();
 1311         return ((void *)STACKALIGN(thread0.td_pcb));
 1312 
 1313 }
 1314 
 1315 #endif /* __ARM_ARCH < 6 */
 1316 #endif /* FDT */

Cache object: 61e9e3aea53fb82d563c679b5a49058b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.