The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/locore-v4.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $      */
    2 
    3 /*-
    4  * Copyright 2011 Semihalf
    5  * Copyright (C) 1994-1997 Mark Brinicombe
    6  * Copyright (C) 1994 Brini
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by Brini.
   20  * 4. The name of Brini may not be used to endorse or promote products
   21  *    derived from this software without specific prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
   24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   26  * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   33  *
   34  */
   35 
   36 #include "assym.s"
   37 #include <sys/syscall.h>
   38 #include <machine/asm.h>
   39 #include <machine/armreg.h>
   40 #include <machine/cpuconf.h>
   41 #include <machine/pte.h>
   42 
   43 __FBSDID("$FreeBSD$");
   44 
   45 /* 2K initial stack is plenty, it is only used by initarm() */
   46 #define INIT_ARM_STACK_SIZE     2048
   47 
   48 #define CPWAIT_BRANCH                                                    \
   49         sub     pc, pc, #4
   50 
   51 #define CPWAIT(tmp)                                                      \
   52         mrc     p15, 0, tmp, c2, c0, 0  /* arbitrary read of CP15 */    ;\
   53         mov     tmp, tmp                /* wait for it to complete */   ;\
   54         CPWAIT_BRANCH                   /* branch to next insn */
   55 
   56 /*
   57  * This is for libkvm, and should be the address of the beginning
   58  * of the kernel text segment (not necessarily the same as kernbase).
   59  *
   60  * These are being phased out. Newer copies of libkvm don't need these
   61  * values as the information is added to the core file by inspecting
   62  * the running kernel.
   63  */
   64         .text
   65         .align  2
   66 #ifdef PHYSADDR
   67 .globl kernbase
   68 .set kernbase,KERNBASE
   69 .globl physaddr
   70 .set physaddr,PHYSADDR
   71 #endif
   72 
   73 /*
   74  * On entry for FreeBSD boot ABI:
   75  *      r0 - metadata pointer or 0 (boothowto on AT91's boot2)
   76  *      r1 - if (r0 == 0) then metadata pointer
   77  * On entry for Linux boot ABI:
   78  *      r0 - 0
   79  *      r1 - machine type (passed as arg2 to initarm)
   80  *      r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
   81  *
   82  * For both types of boot we gather up the args, put them in a struct arm_boot_params
   83  * structure and pass that to initarm.
   84  */
   85         .globl  btext
   86 btext:
   87 ASENTRY_NP(_start)
   88         STOP_UNWINDING          /* Can't unwind into the bootloader! */
   89 
   90         mov     r9, r0          /* 0 or boot mode from boot2 */
   91         mov     r8, r1          /* Save Machine type */
   92         mov     ip, r2          /* Save meta data */
   93         mov     fp, r3          /* Future expansion */
   94 
   95         /* Make sure interrupts are disabled. */
   96         mrs     r7, cpsr
   97         orr     r7, r7, #(PSR_I | PSR_F)
   98         msr     cpsr_c, r7
   99 
  100 #if defined (FLASHADDR) && defined(LOADERRAMADDR)
  101 /*
  102  * Sanity check the configuration.
  103  * FLASHADDR and LOADERRAMADDR depend on PHYSADDR in some cases.
  104  * ARMv4 and ARMv5 make assumptions on where they are loaded.
  105  * TODO: Fix the ARMv4/v5 case.
  106  */
  107 #ifndef PHYSADDR
  108 #error PHYSADDR must be defined for this configuration
  109 #endif
  110 
  111         /* Check if we're running from flash. */
  112         ldr     r7, =FLASHADDR
  113         /*
  114          * If we're running with MMU disabled, test against the
  115          * physical address instead.
  116          */
  117         mrc     p15, 0, r2, c1, c0, 0
  118         ands    r2, r2, #CPU_CONTROL_MMU_ENABLE
  119         ldreq   r6, =PHYSADDR
  120         ldrne   r6, =LOADERRAMADDR
  121         cmp     r7, r6
  122         bls     flash_lower
  123         cmp     r7, pc
  124         bhi     from_ram
  125         b       do_copy
  126 
  127 flash_lower:
  128         cmp     r6, pc
  129         bls     from_ram
  130 do_copy:
  131         ldr     r7, =KERNBASE
  132         adr     r1, _start
  133         ldr     r0, Lreal_start
  134         ldr     r2, Lend
  135         sub     r2, r2, r0
  136         sub     r0, r0, r7
  137         add     r0, r0, r6
  138         mov     r4, r0
  139         bl      memcpy
  140         ldr     r0, Lram_offset
  141         add     pc, r4, r0
  142 Lram_offset:    .word from_ram-_C_LABEL(_start)
  143 from_ram:
  144         nop
  145 #endif
  146 
  147 disable_mmu:
  148         /* Disable MMU for a while */
  149         mrc     p15, 0, r2, c1, c0, 0
  150         bic     r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\
  151             CPU_CONTROL_WBUF_ENABLE)
  152         bic     r2, r2, #(CPU_CONTROL_IC_ENABLE)
  153         bic     r2, r2, #(CPU_CONTROL_BPRD_ENABLE)
  154         mcr     p15, 0, r2, c1, c0, 0
  155 
  156         nop
  157         nop
  158         nop
  159         CPWAIT(r0)
  160 
  161 Lunmapped:
  162         /*
  163          * Build page table from scratch.
  164          */
  165 
  166         /* 
  167          * Figure out the physical address we're loaded at by assuming this
  168          * entry point code is in the first L1 section and so if we clear the
  169          * offset bits of the pc that will give us the section-aligned load
  170          * address, which remains in r5 throughout all the following code.
  171          */
  172         ldr     r2, =(L1_S_OFFSET)
  173         bic     r5, pc, r2
  174 
  175         /* Find the delta between VA and PA, result stays in r0 throughout. */
  176         adr     r0, Lpagetable
  177         bl      translate_va_to_pa
  178 
  179         /* 
  180          * First map the entire 4GB address space as VA=PA.  It's mapped as
  181          * normal (cached) memory because it's for things like accessing the
  182          * parameters passed in from the bootloader, which might be at any
  183          * physical address, different for every platform.
  184          */
  185         mov     r1, #0
  186         mov     r2, #0
  187         mov     r3, #4096
  188         bl      build_pagetables
  189 
  190         /* 
  191          * Next we do 64MiB starting at the physical load address, mapped to
  192          * the VA the kernel is linked for.
  193          */
  194         mov     r1, r5
  195         ldr     r2, =(KERNVIRTADDR)
  196         mov     r3, #64
  197         bl      build_pagetables
  198 
  199         /* Create a device mapping for early_printf if specified. */
  200 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
  201         ldr     r1, =SOCDEV_PA
  202         ldr     r2, =SOCDEV_VA
  203         mov     r3, #1
  204         bl      build_device_pagetables
  205 #endif
  206 
  207         mcr     p15, 0, r0, c2, c0, 0   /* Set TTB */
  208         mcr     p15, 0, r0, c8, c7, 0   /* Flush TLB */
  209 
  210         /* Set the Domain Access register.  Very important! */
  211         mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
  212         mcr     p15, 0, r0, c3, c0, 0
  213         /*
  214          * Enable MMU.
  215          */
  216         mrc     p15, 0, r0, c1, c0, 0
  217         orr     r0, r0, #(CPU_CONTROL_MMU_ENABLE)
  218         mcr     p15, 0, r0, c1, c0, 0
  219         nop
  220         nop
  221         nop
  222         CPWAIT(r0)
  223 
  224         /* Transition the PC from physical to virtual addressing. */
  225         ldr     pc,=mmu_done
  226 
  227 mmu_done:
  228         nop
  229         adr     r1, .Lstart
  230         ldmia   r1, {r1, r2, sp}        /* Set initial stack and */
  231         sub     r2, r2, r1              /* get zero init data */
  232         mov     r3, #0
  233 .L1:
  234         str     r3, [r1], #0x0004       /* get zero init data */
  235         subs    r2, r2, #4
  236         bgt     .L1
  237 
  238 virt_done:
  239         mov     r1, #28                 /* loader info size is 28 bytes also second arg */
  240         subs    sp, sp, r1              /* allocate arm_boot_params struct on stack */
  241         mov     r0, sp                  /* loader info pointer is first arg */
  242         bic     sp, sp, #7              /* align stack to 8 bytes */
  243         str     r1, [r0]                /* Store length of loader info */
  244         str     r9, [r0, #4]            /* Store r0 from boot loader */
  245         str     r8, [r0, #8]            /* Store r1 from boot loader */
  246         str     ip, [r0, #12]           /* store r2 from boot loader */
  247         str     fp, [r0, #16]           /* store r3 from boot loader */
  248         str     r5, [r0, #20]           /* store the physical address */
  249         adr     r4, Lpagetable          /* load the pagetable address */
  250         ldr     r5, [r4, #4]
  251         str     r5, [r0, #24]           /* store the pagetable address */
  252         mov     fp, #0                  /* trace back starts here */
  253         bl      _C_LABEL(initarm)       /* Off we go */
  254 
  255         /* init arm will return the new stack pointer. */
  256         mov     sp, r0
  257 
  258         bl      _C_LABEL(mi_startup)    /* call mi_startup()! */
  259 
  260         adr     r0, .Lmainreturned
  261         b       _C_LABEL(panic)
  262         /* NOTREACHED */
  263 END(_start)
  264 
  265 #define VA_TO_PA_POINTER(name, table)    \
  266 name:                                   ;\
  267         .word   .                       ;\
  268         .word   table
  269 
  270 /*
  271  * Returns the physical address of a magic va to pa pointer.
  272  * r0     - The pagetable data pointer. This must be built using the
  273  *          VA_TO_PA_POINTER macro.
  274  *          e.g.
  275  *            VA_TO_PA_POINTER(Lpagetable, pagetable)
  276  *            ...
  277  *            adr  r0, Lpagetable
  278  *            bl   translate_va_to_pa
  279  *            r0 will now contain the physical address of pagetable
  280  * r1, r2 - Trashed
  281  */
  282 translate_va_to_pa:
  283         ldr     r1, [r0]
  284         sub     r2, r1, r0
  285         /* At this point: r2 = VA - PA */
  286 
  287         /*
  288          * Find the physical address of the table. After these two
  289          * instructions:
  290          * r1 = va(pagetable)
  291          *
  292          * r0 = va(pagetable) - (VA - PA)
  293          *    = va(pagetable) - VA + PA
  294          *    = pa(pagetable)
  295          */
  296         ldr     r1, [r0, #4]
  297         sub     r0, r1, r2
  298         RET
  299 
  300 /*
  301  * Builds the page table
  302  * r0 - The table base address
  303  * r1 - The physical address (trashed)
  304  * r2 - The virtual address (trashed)
  305  * r3 - The number of 1MiB sections
  306  * r4 - Trashed
  307  *
  308  * Addresses must be 1MiB aligned
  309  */
  310 build_device_pagetables:
  311         ldr     r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
  312         b       1f
  313 build_pagetables:
  314         /* Set the required page attributed */
  315         ldr     r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
  316 1:
  317         orr     r1, r4
  318 
  319         /* Move the virtual address to the correct bit location */
  320         lsr     r2, #(L1_S_SHIFT - 2)
  321 
  322         mov     r4, r3
  323 2:
  324         str     r1, [r0, r2]
  325         add     r2, r2, #4
  326         add     r1, r1, #(L1_S_SIZE)
  327         adds    r4, r4, #-1
  328         bhi     2b
  329 
  330         RET
  331 
  332 VA_TO_PA_POINTER(Lpagetable, pagetable)
  333 
  334 Lreal_start:
  335         .word   _start
  336 Lend:
  337         .word   _edata
  338 
  339 .Lstart:
  340         .word   _edata
  341         .word   _ebss
  342         .word   svcstk + INIT_ARM_STACK_SIZE
  343 
  344 .Lvirt_done:
  345         .word   virt_done
  346 
  347 .Lmainreturned:
  348         .asciz  "main() returned"
  349         .align  2
  350 
  351         .bss
  352 svcstk:
  353         .space  INIT_ARM_STACK_SIZE
  354 
  355 /*
  356  * Memory for the initial pagetable. We are unable to place this in
  357  * the bss as this will be cleared after the table is loaded.
  358  */
  359         .section ".init_pagetable"
  360         .align  14 /* 16KiB aligned */
  361 pagetable:
  362         .space  L1_TABLE_SIZE
  363 
  364         .text
  365         .align  2
  366 
  367 .Lcpufuncs:
  368         .word   _C_LABEL(cpufuncs)
  369 
  370 ENTRY_NP(cpu_halt)
  371         mrs     r2, cpsr
  372         bic     r2, r2, #(PSR_MODE)
  373         orr     r2, r2, #(PSR_SVC32_MODE)
  374         orr     r2, r2, #(PSR_I | PSR_F)
  375         msr     cpsr_fsxc, r2
  376 
  377         ldr     r4, .Lcpu_reset_address
  378         ldr     r4, [r4]
  379 
  380         ldr     r0, .Lcpufuncs
  381         mov     lr, pc
  382         ldr     pc, [r0, #CF_IDCACHE_WBINV_ALL]
  383         mov     lr, pc
  384         ldr     pc, [r0, #CF_L2CACHE_WBINV_ALL]
  385 
  386         /*
  387          * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's
  388          * necessary.
  389          */
  390 
  391         ldr     r1, .Lcpu_reset_needs_v4_MMU_disable
  392         ldr     r1, [r1]
  393         cmp     r1, #0
  394         mov     r2, #0
  395 
  396         /*
  397          * MMU & IDC off, 32 bit program & data space
  398          * Hurl ourselves into the ROM
  399          */
  400         mov     r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE)
  401         mcr     p15, 0, r0, c1, c0, 0
  402         mcrne   p15, 0, r2, c8, c7, 0   /* nail I+D TLB on ARMv4 and greater */
  403         mov     pc, r4
  404 
  405         /*
  406          * _cpu_reset_address contains the address to branch to, to complete
  407          * the cpu reset after turning the MMU off
  408          * This variable is provided by the hardware specific code
  409          */
  410 .Lcpu_reset_address:
  411         .word   _C_LABEL(cpu_reset_address)
  412 
  413         /*
  414          * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the
  415          * v4 MMU disable instruction needs executing... it is an illegal instruction
  416          * on f.e. ARM6/7 that locks up the computer in an endless illegal
  417          * instruction / data-abort / reset loop.
  418          */
  419 .Lcpu_reset_needs_v4_MMU_disable:
  420         .word   _C_LABEL(cpu_reset_needs_v4_MMU_disable)
  421 END(cpu_halt)
  422 
  423 
  424 /*
  425  * setjump + longjmp
  426  */
  427 ENTRY(setjmp)
  428         stmia   r0, {r4-r14}
  429         mov     r0, #0x00000000
  430         RET
  431 END(setjmp)
  432 
  433 ENTRY(longjmp)
  434         ldmia   r0, {r4-r14}
  435         mov     r0, #0x00000001
  436         RET
  437 END(longjmp)
  438 
  439         .data
  440         .global _C_LABEL(esym)
  441 _C_LABEL(esym): .word   _C_LABEL(end)
  442 
  443 ENTRY_NP(abort)
  444         b       _C_LABEL(abort)
  445 END(abort)
  446 
  447 ENTRY_NP(sigcode)
  448         mov     r0, sp
  449         add     r0, r0, #SIGF_UC
  450 
  451         /*
  452          * Call the sigreturn system call.
  453          *
  454          * We have to load r7 manually rather than using
  455          * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
  456          * correct. Using the alternative places esigcode at the address
  457          * of the data rather than the address one past the data.
  458          */
  459 
  460         ldr     r7, [pc, #12]   /* Load SYS_sigreturn */
  461         swi     SYS_sigreturn
  462 
  463         /* Well if that failed we better exit quick ! */
  464 
  465         ldr     r7, [pc, #8]    /* Load SYS_exit */
  466         swi     SYS_exit
  467 
  468         /* Branch back to retry SYS_sigreturn */
  469         b       . - 16
  470 END(sigcode)
  471         .word   SYS_sigreturn
  472         .word   SYS_exit
  473 
  474         .align  2
  475         .global _C_LABEL(esigcode)
  476                 _C_LABEL(esigcode):
  477 
  478         .data
  479         .global szsigcode
  480 szsigcode:
  481         .long esigcode-sigcode
  482 
  483 /* End of locore.S */

Cache object: 5606c00d6052f903c97fa0016234348b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.