The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/locore-v6.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 2004-2014 Olivier Houchard <cognet@FreeBSD.org>
    3  * Copyright 2012-2014 Ian Lepore <ian@FreeBSD.org>
    4  * Copyright 2013-2014 Andrew Turner <andrew@FreeBSD.org>
    5  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
    6  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  */
   30 
   31 #include "assym.s"
   32 #include <sys/syscall.h>
   33 #include <machine/asm.h>
   34 #include <machine/asmacros.h>
   35 #include <machine/armreg.h>
   36 #include <machine/sysreg.h>
   37 #include <machine/cpuconf.h>
   38 #include <machine/pte.h>
   39 
   40 __FBSDID("$FreeBSD$");
   41 
   42 #ifndef ARM_NEW_PMAP
   43 #define PTE1_OFFSET     L1_S_OFFSET
   44 #define PTE1_SHIFT      L1_S_SHIFT
   45 #define PTE1_SIZE       L1_S_SIZE
   46 #endif
   47 
   48 /* A small statically-allocated stack used only during initarm() and AP startup. */
   49 #define INIT_ARM_STACK_SIZE     2048
   50 
   51         .text
   52         .align  2
   53 
   54 /*
   55  * On entry for FreeBSD boot ABI:
   56  *      r0 - metadata pointer or 0 (boothowto on AT91's boot2)
   57  *      r1 - if (r0 == 0) then metadata pointer
   58  * On entry for Linux boot ABI:
   59  *      r0 - 0
   60  *      r1 - machine type (passed as arg2 to initarm)
   61  *      r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm)
   62  *
   63  * For both types of boot we gather up the args, put them in a struct arm_boot_params
   64  * structure and pass that to initarm.
   65  */
   66         .globl  btext
   67 btext:
   68 ASENTRY_NP(_start)
   69         STOP_UNWINDING          /* Can't unwind into the bootloader! */
   70 
   71         /* Make sure interrupts are disabled. */
   72         cpsid   ifa
   73 
   74         mov     r8, r0          /* 0 or boot mode from boot2 */
   75         mov     r9, r1          /* Save Machine type */
   76         mov     r10, r2         /* Save meta data */
   77         mov     r11, r3         /* Future expansion */
   78 
   79         /*
   80          * Check whether data cache is enabled.  If it is, then we know
   81          * current tags are valid (not power-on garbage values) and there
   82          * might be dirty lines that need cleaning.  Disable cache to prevent
   83          * new lines being allocated, then call wbinv_poc_all to clean it.
   84          */
   85         mrc     CP15_SCTLR(r7)
   86         tst     r7, #CPU_CONTROL_DC_ENABLE
   87         blne    dcache_wbinv_poc_all
   88 
   89         /* ! Do not write to memory between wbinv and disabling cache ! */
   90 
   91         /*
   92          * Now there are no dirty lines, but there may still be lines marked
   93          * valid.  Disable all caches and the MMU, and invalidate everything
   94          * before setting up new page tables and re-enabling the mmu.
   95          */
   96 1:
   97         bic     r7, #CPU_CONTROL_DC_ENABLE
   98         bic     r7, #CPU_CONTROL_MMU_ENABLE
   99         bic     r7, #CPU_CONTROL_IC_ENABLE
  100         bic     r7, #CPU_CONTROL_UNAL_ENABLE
  101         bic     r7, #CPU_CONTROL_BPRD_ENABLE
  102         bic     r7, #CPU_CONTROL_SW_ENABLE
  103         orr     r7, #CPU_CONTROL_AFLT_ENABLE
  104         orr     r7, #CPU_CONTROL_VECRELOC
  105         mcr     CP15_SCTLR(r7)
  106         DSB
  107         ISB
  108         bl      dcache_inv_poc_all
  109         mcr     CP15_ICIALLU
  110         DSB
  111         ISB
  112 
  113         /*
  114          * Build page table from scratch.
  115          */
  116 
  117         /* 
  118          * Figure out the physical address we're loaded at by assuming this
  119          * entry point code is in the first L1 section and so if we clear the
  120          * offset bits of the pc that will give us the section-aligned load
  121          * address, which remains in r5 throughout all the following code.
  122          */
  123         ldr     r2, =(L1_S_OFFSET)
  124         bic     r5, pc, r2
  125 
  126         /* Find the delta between VA and PA, result stays in r0 throughout. */
  127         adr     r0, Lpagetable
  128         bl      translate_va_to_pa
  129 
  130         /* 
  131          * First map the entire 4GB address space as VA=PA.  It's mapped as
  132          * normal (cached) memory because it's for things like accessing the
  133          * parameters passed in from the bootloader, which might be at any
  134          * physical address, different for every platform.
  135          */
  136         mov     r1, #0
  137         mov     r2, #0
  138         mov     r3, #4096
  139         bl      build_pagetables
  140 
  141         /* 
  142          * Next we do 64MiB starting at the physical load address, mapped to
  143          * the VA the kernel is linked for.
  144          */
  145         mov     r1, r5
  146         ldr     r2, =(KERNVIRTADDR)
  147         mov     r3, #64
  148         bl      build_pagetables
  149 
  150         /* Create a device mapping for early_printf if specified. */
  151 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
  152         ldr     r1, =SOCDEV_PA
  153         ldr     r2, =SOCDEV_VA
  154         mov     r3, #1
  155         bl      build_device_pagetables
  156 #endif
  157         bl      init_mmu
  158 
  159         /* Transition the PC from physical to virtual addressing. */
  160         ldr     pc, =1f
  161 1:
  162 
  163         /* Setup stack, clear BSS */
  164         ldr     r1, =.Lstart
  165         ldmia   r1, {r1, r2, sp}        /* Set initial stack and */
  166         add     sp, sp, #INIT_ARM_STACK_SIZE
  167         sub     r2, r2, r1              /* get zero init data */
  168         mov     r3, #0
  169 2:
  170         str     r3, [r1], #0x0004       /* get zero init data */
  171         subs    r2, r2, #4
  172         bgt     2b
  173 
  174         mov     r1, #28                 /* loader info size is 28 bytes also second arg */
  175         subs    sp, sp, r1              /* allocate arm_boot_params struct on stack */
  176         mov     r0, sp                  /* loader info pointer is first arg */
  177         bic     sp, sp, #7              /* align stack to 8 bytes */
  178         str     r1, [r0]                /* Store length of loader info */
  179         str     r8, [r0, #4]            /* Store r0 from boot loader */
  180         str     r9, [r0, #8]            /* Store r1 from boot loader */
  181         str     r10, [r0, #12]          /* store r2 from boot loader */
  182         str     r11, [r0, #16]          /* store r3 from boot loader */
  183         str     r5, [r0, #20]           /* store the physical address */
  184         adr     r4, Lpagetable          /* load the pagetable address */
  185         ldr     r5, [r4, #4]
  186         str     r5, [r0, #24]           /* store the pagetable address */
  187         mov     fp, #0                  /* trace back starts here */
  188         bl      _C_LABEL(initarm)       /* Off we go */
  189 
  190         /* init arm will return the new stack pointer. */
  191         mov     sp, r0
  192 
  193         bl      _C_LABEL(mi_startup)    /* call mi_startup()! */
  194 
  195         ldr     r0, =.Lmainreturned
  196         b       _C_LABEL(panic)
  197         /* NOTREACHED */
  198 END(_start)
  199 
  200 #define VA_TO_PA_POINTER(name, table)    \
  201 name:                                   ;\
  202         .word   .                       ;\
  203         .word   table
  204 
  205 /*
  206  * Returns the physical address of a magic va to pa pointer.
  207  * r0     - The pagetable data pointer. This must be built using the
  208  *          VA_TO_PA_POINTER macro.
  209  *          e.g.
  210  *            VA_TO_PA_POINTER(Lpagetable, pagetable)
  211  *            ...
  212  *            adr  r0, Lpagetable
  213  *            bl   translate_va_to_pa
  214  *            r0 will now contain the physical address of pagetable
  215  * r1, r2 - Trashed
  216  */
  217 translate_va_to_pa:
  218         ldr     r1, [r0]
  219         sub     r2, r1, r0
  220         /* At this point: r2 = VA - PA */
  221 
  222         /*
  223          * Find the physical address of the table. After these two
  224          * instructions:
  225          * r1 = va(pagetable)
  226          *
  227          * r0 = va(pagetable) - (VA - PA)
  228          *    = va(pagetable) - VA + PA
  229          *    = pa(pagetable)
  230          */
  231         ldr     r1, [r0, #4]
  232         sub     r0, r1, r2
  233         mov     pc, lr
  234 
  235 /*
  236  * Init MMU
  237  * r0 - the table base address
  238  */
  239 
  240 ASENTRY_NP(init_mmu)
  241 
  242         /* Setup TLB and MMU registers */
  243         mcr     CP15_TTBR0(r0)          /* Set TTB */
  244         mov     r0, #0
  245         mcr     CP15_CONTEXTIDR(r0)     /* Set ASID to 0 */
  246 
  247         /* Set the Domain Access register */
  248         mov     r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
  249         mcr     CP15_DACR(r0)
  250 
  251 #ifdef ARM_NEW_PMAP
  252         /*
  253          * Set TEX remap registers
  254          *  - All is set to uncacheable memory
  255          */
  256         ldr     r0, =0xAAAAA
  257         mrc     CP15_PRRR(r0)
  258         mov     r0, #0
  259         mcr     CP15_NMRR(r0)
  260 #endif
  261         mcr     CP15_TLBIALL            /* Flush TLB */
  262         DSB
  263         ISB
  264 
  265         /* Enable MMU */
  266         mrc     CP15_SCTLR(r0)
  267         orr     r0, r0, #CPU_CONTROL_MMU_ENABLE
  268         orr     r0, r0, #CPU_CONTROL_V6_EXTPAGE
  269 #ifdef ARM_NEW_PMAP
  270         orr     r0, r0, #CPU_CONTROL_TR_ENABLE
  271 #endif
  272         orr     r0, r0, #CPU_CONTROL_AF_ENABLE
  273         mcr     CP15_SCTLR(r0)
  274         DSB
  275         ISB
  276         mcr     CP15_TLBIALL            /* Flush TLB */
  277         mcr     CP15_BPIALL             /* Flush Branch predictor */
  278         DSB
  279         ISB
  280 
  281         mov     pc, lr
  282 END(init_mmu)
  283 
  284 
  285 /*
  286  * Init SMP coherent mode, enable caching and switch to final MMU table.
  287  * Called with disabled caches
  288  * r0 - The table base address
  289  * r1 - clear bits for aux register
  290  * r2 - set bits for aux register
  291  */
  292 ASENTRY_NP(reinit_mmu)
  293         push    {r4-r11, lr}
  294         mov     r4, r0
  295         mov     r5, r1
  296         mov     r6, r2
  297 
  298         /* !! Be very paranoid here !! */
  299         /* !! We cannot write single bit here !! */
  300 
  301 #if 0   /* XXX writeback shouldn't be necessary */
  302         /* Write back and invalidate all integrated caches */
  303         bl      dcache_wbinv_poc_all
  304 #else
  305         bl      dcache_inv_pou_all
  306 #endif
  307         mcr     CP15_ICIALLU
  308         DSB
  309         ISB
  310 
  311         /* Set auxiliary register */
  312         mrc     CP15_ACTLR(r7)
  313         bic     r8, r7, r5              /* Mask bits */
  314         eor     r8, r8, r6              /* Set bits */
  315         teq     r7, r8
  316         mcrne   CP15_ACTLR(r8)
  317         DSB
  318         ISB
  319 
  320         /* Enable caches. */
  321         mrc     CP15_SCTLR(r7)
  322         orr     r7, #CPU_CONTROL_DC_ENABLE
  323         orr     r7, #CPU_CONTROL_IC_ENABLE
  324         orr     r7, #CPU_CONTROL_BPRD_ENABLE
  325         mcr     CP15_SCTLR(r7)
  326         DSB
  327 
  328         mcr     CP15_TTBR0(r4)          /* Set new TTB */
  329         DSB
  330         ISB
  331 
  332         mcr     CP15_TLBIALL            /* Flush TLB */
  333         mcr     CP15_BPIALL             /* Flush Branch predictor */
  334         DSB
  335         ISB
  336 
  337 #if 0 /* XXX writeback shouldn't be necessary */
  338         /* Write back and invalidate all integrated caches */
  339         bl      dcache_wbinv_poc_all
  340 #else
  341         bl      dcache_inv_pou_all
  342 #endif
  343         mcr     CP15_ICIALLU
  344         DSB
  345         ISB
  346 
  347         pop     {r4-r11, pc}
  348 END(reinit_mmu)
  349 
  350 
  351 /*
  352  * Builds the page table
  353  * r0 - The table base address
  354  * r1 - The physical address (trashed)
  355  * r2 - The virtual address (trashed)
  356  * r3 - The number of 1MiB sections
  357  * r4 - Trashed
  358  *
  359  * Addresses must be 1MiB aligned
  360  */
  361 build_device_pagetables:
  362 #if defined(ARM_NEW_PMAP)
  363         ldr     r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
  364 #elif defined(SMP)
  365         ldr     r4, =(L1_TYPE_S|L1_S_AP(AP_KRW)|L1_SHARED)
  366 #else
  367         ldr     r4, =(L1_TYPE_S|L1_S_AP(AP_KRW))
  368 #endif
  369         b       1f
  370 build_pagetables:
  371         /* Set the required page attributed */
  372 #if defined(ARM_NEW_PMAP)
  373         ldr     r4, =PTE1_V|PTE1_A|PTE1_AP_KRW|TEX1_CLASS_0
  374 #elif defined(SMP)
  375         ldr     r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|L1_SHARED)
  376 #else
  377         ldr     r4, =(L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW))
  378 #endif
  379 1:
  380         orr     r1, r4
  381 
  382         /* Move the virtual address to the correct bit location */
  383         lsr     r2, #(PTE1_SHIFT - 2)
  384 
  385         mov     r4, r3
  386 2:
  387         str     r1, [r0, r2]
  388         add     r2, r2, #4
  389         add     r1, r1, #(PTE1_SIZE)
  390         adds    r4, r4, #-1
  391         bhi     2b
  392 
  393         mov     pc, lr
  394 
  395 VA_TO_PA_POINTER(Lpagetable, boot_pt1)
  396 
  397 
  398 .Lstart:
  399         .word   _edata                  /* Note that these three items are */
  400         .word   _ebss                   /* loaded with a single ldmia and */
  401         .word   svcstk                  /* must remain in order together. */
  402 
  403 .Lmainreturned:
  404         .asciz  "main() returned"
  405         .align  2
  406 
  407         .bss
  408 svcstk:
  409         .space  INIT_ARM_STACK_SIZE * MAXCPU
  410 
  411 /*
  412  * Memory for the initial pagetable. We are unable to place this in
  413  * the bss as this will be cleared after the table is loaded.
  414  */
  415         .section ".init_pagetable"
  416         .align  14 /* 16KiB aligned */
  417         .globl  boot_pt1
  418 boot_pt1:
  419         .space  L1_TABLE_SIZE
  420 
  421         .text
  422         .align  2
  423 
  424 .Lcpufuncs:
  425         .word   _C_LABEL(cpufuncs)
  426 
  427 #if defined(SMP)
  428 
  429 ASENTRY_NP(mpentry)
  430         /* Make sure interrupts are disabled. */
  431         cpsid   ifa
  432 
  433         /* Setup core, disable all caches. */
  434         mrc     CP15_SCTLR(r0)
  435         bic     r0, #CPU_CONTROL_MMU_ENABLE
  436         bic     r0, #CPU_CONTROL_DC_ENABLE
  437         bic     r0, #CPU_CONTROL_IC_ENABLE
  438         bic     r0, #CPU_CONTROL_UNAL_ENABLE
  439         bic     r0, #CPU_CONTROL_BPRD_ENABLE
  440         bic     r0, #CPU_CONTROL_SW_ENABLE
  441         orr     r0, #CPU_CONTROL_AFLT_ENABLE
  442         orr     r0, #CPU_CONTROL_VECRELOC
  443         mcr     CP15_SCTLR(r0)
  444         DSB
  445         ISB
  446 
  447         /* Invalidate L1 cache I+D cache */
  448         bl      dcache_inv_pou_all
  449         mcr     CP15_ICIALLU
  450         DSB
  451         ISB
  452 
  453         /* Find the delta between VA and PA */
  454         adr     r0, Lpagetable
  455         bl      translate_va_to_pa
  456 
  457         bl      init_mmu
  458 
  459         adr     r1, .Lstart+8           /* Get initstack pointer from */
  460         ldr     sp, [r1]                /* startup data. */
  461         mrc     CP15_MPIDR(r0)          /* Get processor id number. */
  462         and     r0, r0, #0x0f
  463         mov     r1, #INIT_ARM_STACK_SIZE
  464         mul     r2, r1, r0              /* Point sp to initstack */
  465         add     sp, sp, r2              /* area for this processor. */
  466 
  467         /* Switch to virtual addresses. */
  468         ldr     pc, =1f
  469 1:
  470         mov     fp, #0                  /* trace back starts here */
  471         bl      _C_LABEL(init_secondary)/* Off we go, cpu id in r0. */
  472 
  473         adr     r0, .Lmpreturned
  474         b       _C_LABEL(panic)
  475         /* NOTREACHED */
  476 END(mpentry)
  477 
  478 .Lmpreturned:
  479         .asciz  "init_secondary() returned"
  480         .align  2
  481 #endif
  482 
  483 ENTRY_NP(cpu_halt)
  484 
  485         /* XXX re-implement !!! */
  486         cpsid   ifa
  487         bl      dcache_wbinv_poc_all
  488 
  489         ldr     r4, .Lcpu_reset_address
  490         ldr     r4, [r4]
  491         teq     r4, #0
  492         movne   pc, r4
  493 1:
  494         WFI
  495         b       1b
  496 
  497         /*
  498          * _cpu_reset_address contains the address to branch to, to complete
  499          * the cpu reset after turning the MMU off
  500          * This variable is provided by the hardware specific code
  501          */
  502 .Lcpu_reset_address:
  503         .word   _C_LABEL(cpu_reset_address)
  504 END(cpu_halt)
  505 
  506 
  507 /*
  508  * setjump + longjmp
  509  */
  510 ENTRY(setjmp)
  511         stmia   r0, {r4-r14}
  512         mov     r0, #0x00000000
  513         RET
  514 END(setjmp)
  515 
  516 ENTRY(longjmp)
  517         ldmia   r0, {r4-r14}
  518         mov     r0, #0x00000001
  519         RET
  520 END(longjmp)
  521 
  522         .data
  523         .global _C_LABEL(esym)
  524 _C_LABEL(esym): .word   _C_LABEL(end)
  525 
  526 ENTRY_NP(abort)
  527         b       _C_LABEL(abort)
  528 END(abort)
  529 
  530 ENTRY_NP(sigcode)
  531         mov     r0, sp
  532         add     r0, r0, #SIGF_UC
  533 
  534         /*
  535          * Call the sigreturn system call.
  536          *
  537          * We have to load r7 manually rather than using
  538          * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is
  539          * correct. Using the alternative places esigcode at the address
  540          * of the data rather than the address one past the data.
  541          */
  542 
  543         ldr     r7, [pc, #12]   /* Load SYS_sigreturn */
  544         swi     SYS_sigreturn
  545 
  546         /* Well if that failed we better exit quick ! */
  547 
  548         ldr     r7, [pc, #8]    /* Load SYS_exit */
  549         swi     SYS_exit
  550 
  551         /* Branch back to retry SYS_sigreturn */
  552         b       . - 16
  553 END(sigcode)
  554         .word   SYS_sigreturn
  555         .word   SYS_exit
  556 
  557         .align  2
  558         .global _C_LABEL(esigcode)
  559                 _C_LABEL(esigcode):
  560 
  561         .data
  562         .global szsigcode
  563 szsigcode:
  564         .long esigcode-sigcode
  565 
  566 /* End of locore.S */

Cache object: f4fe4b4dafd5692ee326bcdbb86a2860


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.