The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/aim/locore64.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $FreeBSD$ */
    2 
    3 /*-
    4  * Copyright (C) 2010-2016 Nathan Whitehorn
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   19  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   26  *
   27  * $FreeBSD$
   28  */
   29 
   30 #include "assym.inc"
   31 
   32 #include <sys/syscall.h>
   33 
   34 #include <machine/trap.h>
   35 #include <machine/param.h>
   36 #include <machine/spr.h>
   37 #include <machine/asm.h>
   38 #include <machine/vmparam.h>
   39 
   40 #ifdef _CALL_ELF
   41 .abiversion _CALL_ELF
   42 #endif
   43 
   44 /* Glue for linker script */
   45 .globl  kernbase
   46 .set    kernbase, KERNBASE
   47 
   48 /*
   49  * Globals
   50  */
   51         .data
   52         .align 3
   53 GLOBAL(__startkernel)
   54         .llong  begin
   55 GLOBAL(__endkernel)
   56         .llong  end
   57 GLOBAL(can_wakeup)
   58         .llong  0x0
   59 
   60         .align  4
   61 #define TMPSTKSZ        16384           /* 16K temporary stack */
   62 GLOBAL(tmpstk)
   63         .space  TMPSTKSZ
   64 
   65 TOC_ENTRY(tmpstk)
   66 TOC_ENTRY(can_wakeup)
   67 
   68 #ifdef KDB
   69 #define TRAPSTKSZ       8192            /* 8k trap stack */
   70 GLOBAL(trapstk)
   71         .space        TRAPSTKSZ
   72 TOC_ENTRY(trapstk)
   73 #endif
   74 
   75 
   76 /*
   77  * Entry point for bootloaders that do not fully implement ELF and start
   78  * at the beginning of the image (kexec, notably). In its own section so
   79  * that it ends up before any linker-generated call stubs and actually at
   80  * the beginning of the image. kexec on some systems also enters at
   81  * (start of image) + 0x60, so put a spin loop there.
   82  */
   83         .section ".text.kboot", "x", @progbits
   84 kbootentry:
   85 #ifdef __LITTLE_ENDIAN__
   86         RETURN_TO_NATIVE_ENDIAN
   87 #endif
   88         b __start
   89 . = kbootentry + 0x40   /* Magic address used in platform layer */
   90         .global smp_spin_sem
   91 ap_kexec_spin_sem:
   92         .long   -1
   93 . = kbootentry + 0x60   /* Entry point for kexec APs */
   94 ap_kexec_start:         /* At 0x60 past start, copied to 0x60 by kexec */
   95         /* r3 set to CPU ID by kexec */
   96 
   97         /* Invalidate icache for low-memory copy and jump there */
   98         li      %r0,0x80
   99         dcbst   0,%r0
  100         sync
  101         icbi    0,%r0
  102         isync
  103         ba      0x80                    /* Absolute branch to next inst */
  104 
  105 . = kbootentry + 0x80                   /* Aligned to cache line */
  106 1:      or      31,31,31                /* yield */
  107         sync
  108         lwz     %r1,0x40(0)             /* Spin on ap_kexec_spin_sem */
  109         cmpw    %r1,%r3                 /* Until it equals our CPU ID */
  110         bne     1b
  111         
  112         /* Released */
  113         or      2,2,2                   /* unyield */
  114 
  115         /* Make sure that it will be software reset. Clear SRR1 */
  116         li      %r1,0
  117         mtsrr1  %r1
  118         ba      EXC_RST
  119 
  120 /*
  121  * Now start the real text section
  122  */
  123 
  124         .text
  125         .globl  btext
  126 btext:
  127 
  128 /*
  129  * Main kernel entry point.
  130  *
  131  * Calling convention:
  132  * r3: Flattened Device Tree pointer (or zero)
  133  * r4: ignored
  134  * r5: OF client interface pointer (or zero)
  135  * r6: Loader metadata pointer (or zero)
  136  * r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata
  137  */
  138         .text
  139 _NAKED_ENTRY(__start)
  140 
  141 #ifdef  __LITTLE_ENDIAN__
  142         RETURN_TO_NATIVE_ENDIAN
  143 #endif
  144         /* Set 64-bit mode if not yet set before branching to C */
  145         mfmsr   %r20
  146         li      %r21,1
  147         insrdi  %r20,%r21,1,0
  148         mtmsrd  %r20
  149         isync
  150         nop     /* Make this block a multiple of 8 bytes */
  151 
  152         /* Set up the TOC pointer */
  153         b       0f
  154         .align 3
  155 0:      nop
  156         bl      1f
  157         .llong  __tocbase + 0x8000 - .
  158 1:      mflr    %r2
  159         ld      %r1,0(%r2)
  160         add     %r2,%r1,%r2
  161 
  162         /* Get load offset */
  163         ld      %r31,-0x8000(%r2) /* First TOC entry is TOC base */
  164         subf    %r31,%r31,%r2   /* Subtract from real TOC base to get base */
  165 
  166         /* Set up the stack pointer */
  167         bl      1f
  168         .llong  tmpstk + TMPSTKSZ - 96 - .
  169 1:      mflr    %r30
  170         ld      %r1,0(%r30)
  171         add     %r1,%r1,%r30
  172         nop
  173 
  174         /* Relocate kernel */
  175         std     %r3,48(%r1)
  176         std     %r4,56(%r1)
  177         std     %r5,64(%r1)
  178         std     %r6,72(%r1)
  179         std     %r7,80(%r1)
  180 
  181         bl      1f
  182         .llong _DYNAMIC-.
  183 1:      mflr    %r3
  184         ld      %r4,0(%r3)
  185         add     %r3,%r4,%r3
  186         mr      %r4,%r31
  187         bl      elf_reloc_self
  188         nop
  189         ld      %r3,48(%r1)
  190         ld      %r4,56(%r1)
  191         ld      %r5,64(%r1)
  192         ld      %r6,72(%r1)
  193         ld      %r7,80(%r1)
  194 
  195         /* Begin CPU init */
  196         mr      %r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */
  197         bl      powerpc_init
  198         nop
  199 
  200         /* Set stack pointer to new value and branch to mi_startup */
  201         mr      %r1, %r3
  202         li      %r3, 0
  203         std     %r3, 0(%r1)
  204         bl      mi_startup
  205         nop
  206 
  207         /* Unreachable */
  208         b       .
  209 _END(__start)
  210 
  211 ASENTRY_NOPROF(__restartkernel_virtual)
  212         /*
  213          * When coming in via this entry point, we need to alter the SLB to
  214          * shadow the segment register emulation entries in DMAP space.
  215          * We need to do this dance because we are running with virtual-mode
  216          * OpenFirmware and have not yet taken over the MMU.
  217          *
  218          * Assumptions:
  219          * 1) The kernel is currently identity-mapped.
  220          * 2) We are currently executing at an address compatible with
  221          *    real mode.
  222          * 3) The first 16 SLB entries are emulating SRs.
  223          * 4) The rest of the SLB is not in use.
  224          * 5) OpenFirmware is not manipulating the SLB at runtime.
  225          * 6) We are running on 64-bit AIM.
  226          *
  227          * Tested on a G5.
  228          */
  229         mfmsr   %r14
  230         /* Switch to real mode because we are about to mess with the SLB. */
  231         andi.   %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
  232         mtmsr   %r14
  233         isync
  234         /* Prepare variables for later use. */
  235         li      %r14, 0
  236         li      %r18, 0
  237         oris    %r18, %r18, 0xc000
  238         sldi    %r18, %r18, 32          /* r18: 0xc000000000000000 */
  239 1:
  240         /*
  241          * Loop over the first 16 SLB entries.
  242          * Offset the SLBE into the DMAP, add 16 to the index, and write
  243          * it back to the SLB.
  244          */
  245         /* XXX add more safety checks */
  246         slbmfev %r15, %r14
  247         slbmfee %r16, %r14
  248         or      %r16, %r16, %r14        /* index is 0-15 */
  249         ori     %r16, %r16, 0x10        /* add 16 to index. */
  250         or      %r16, %r16, %r18        /* SLBE DMAP offset */
  251         rldicr  %r17, %r16, 0, 37       /* Invalidation SLBE */
  252 
  253         isync
  254         slbie   %r17
  255         /* isync */
  256         slbmte  %r15, %r16
  257         isync
  258         addi    %r14, %r14, 1
  259         cmpdi   %r14, 16
  260         blt     1b
  261 
  262         /*
  263          * Now that we are set up with a temporary direct map, we can
  264          * continue with __restartkernel. Translation will be switched
  265          * back on at the rfid, at which point we will be executing from
  266          * the temporary direct map we just installed, until the kernel
  267          * takes over responsibility for the MMU.
  268          */
  269         bl      __restartkernel
  270         nop
  271 ASEND(__restartkernel_virtual)
  272 
  273 ASENTRY_NOPROF(__restartkernel)
  274         /*
  275          * r3-r7: arguments to go to __start
  276          * r8: offset from current kernel address to apply
  277          * r9: MSR to set when (atomically) jumping to __start + r8
  278          */
  279         mtsrr1  %r9
  280         bl      1f
  281 1:      mflr    %r25
  282         add     %r25,%r8,%r25
  283         addi    %r25,%r25,2f-1b
  284         mtsrr0  %r25
  285         rfid
  286 2:      bl      __start
  287         nop
  288 ASEND(__restartkernel)
  289 
  290 #include <powerpc/aim/trap_subr64.S>

Cache object: 5111b6858c5fe97c2fb8244c4b1e1f1c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.