The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/booke/locore.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
    3  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
   18  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   19  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
   20  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   21  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   22  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   23  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 
   29 #include "assym.inc"
   30 
   31 #include "opt_hwpmc_hooks.h"
   32 
   33 #include <machine/asm.h>
   34 #include <machine/hid.h>
   35 #include <machine/param.h>
   36 #include <machine/spr.h>
   37 #include <machine/pte.h>
   38 #include <machine/trap.h>
   39 #include <machine/vmparam.h>
   40 #include <machine/tlb.h>
   41 
   42 #ifdef _CALL_ELF
   43 .abiversion _CALL_ELF
   44 #endif
   45 
   46 #define TMPSTACKSZ      16384
   47 
   48 #ifdef __powerpc64__
   49 #define GET_TOCBASE(r)  \
   50         mfspr   r, SPR_SPRG8
   51 #define TOC_RESTORE     nop
   52 #define CMPI    cmpdi
   53 #define CMPL    cmpld
   54 #define LOAD    ld
   55 #define LOADX   ldarx
   56 #define STORE   std
   57 #define STOREX  stdcx.
   58 #define STU     stdu
   59 #define CALLSIZE        48
   60 #define REDZONE         288
   61 #define THREAD_REG      %r13
   62 #define ADDR(x) \
   63         .llong  x
   64 #define WORD_SIZE       8
   65 #else
   66 #define GET_TOCBASE(r)
   67 #define TOC_RESTORE
   68 #define CMPI    cmpwi
   69 #define CMPL    cmplw
   70 #define LOAD    lwz
   71 #define LOADX   lwarx
   72 #define STOREX  stwcx.
   73 #define STORE   stw
   74 #define STU     stwu
   75 #define CALLSIZE        8
   76 #define REDZONE         0
   77 #define THREAD_REG      %r2
   78 #define ADDR(x) \
   79         .long   x
   80 #define WORD_SIZE       4
   81 #endif
   82 
   83 #ifdef __powerpc64__
   84         /* Placate lld by creating a kboot stub. */
   85         .section ".text.kboot", "x", @progbits
   86         b __start
   87 #endif
   88 
   89         .text
   90         .globl  btext
   91 btext:
   92 
   93 /*
   94  * This symbol is here for the benefit of kvm_mkdb, and is supposed to
   95  * mark the start of kernel text.
   96  */
   97         .globl  kernel_text
   98 kernel_text:
   99 
  100 /*
  101  * Startup entry.  Note, this must be the first thing in the text segment!
  102  */
  103         .text
  104         .globl  __start
  105 __start:
  106 
  107 /*
  108  * Assumptions on the boot loader:
  109  *  - System memory starts from physical address 0
  110  *  - It's mapped by a single TLB1 entry
  111  *  - TLB1 mapping is 1:1 pa to va
  112  *  - Kernel is loaded at 64MB boundary
  113  *  - All PID registers are set to the same value
  114  *  - CPU is running in AS=0
  115  *
  116  * Registers contents provided by the loader(8):
  117  *      r1      : stack pointer
  118  *      r3      : metadata pointer
  119  *
  120  * We rearrange the TLB1 layout as follows:
  121  *  - Find TLB1 entry we started in
  122  *  - Make sure it's protected, invalidate other entries
  123  *  - Create temp entry in the second AS (make sure it's not TLB[1])
  124  *  - Switch to temp mapping
  125  *  - Map 64MB of RAM in TLB1[1]
  126  *  - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
  127  *  - Switch to TLB1[1] mapping
  128  *  - Invalidate temp mapping
  129  *
  130  * locore registers use:
  131  *      r1      : stack pointer
  132  *      r2      : trace pointer (AP only, for early diagnostics)
  133  *      r3-r27  : scratch registers
  134  *      r28     : temp TLB1 entry
  135  *      r29     : initial TLB1 entry we started in
  136  *      r30-r31 : arguments (metadata pointer)
  137  */
  138 
  139 /*
  140  * Keep arguments in r30 & r31 for later use.
  141  */
  142         mr      %r30, %r3
  143         mr      %r31, %r4
  144 
  145 /*
  146  * Initial cleanup
  147  */
  148         li      %r3, PSL_DE     /* Keep debug exceptions for CodeWarrior. */
  149 #ifdef __powerpc64__
  150         oris    %r3, %r3, PSL_CM@h
  151 #endif
  152         mtmsr   %r3
  153         isync
  154 
  155 /*
  156  * Initial HIDs configuration
  157  */
  158 1:
  159         mfpvr   %r3
  160         rlwinm  %r3, %r3, 16, 16, 31
  161 
  162         lis     %r4, HID0_E500_DEFAULT_SET@h
  163         ori     %r4, %r4, HID0_E500_DEFAULT_SET@l
  164 
  165         /* Check for e500mc and e5500 */
  166         cmpli   0, 0, %r3, FSL_E500mc
  167         bne     2f
  168 
  169         lis     %r4, HID0_E500MC_DEFAULT_SET@h
  170         ori     %r4, %r4, HID0_E500MC_DEFAULT_SET@l
  171         b       3f
  172 2:
  173         cmpli   0, 0, %r3, FSL_E5500
  174         bne     3f
  175 
  176         lis     %r4, HID0_E5500_DEFAULT_SET@h
  177         ori     %r4, %r4, HID0_E5500_DEFAULT_SET@l
  178 
  179 3:
  180         mtspr   SPR_HID0, %r4
  181         isync
  182 
  183 /*
  184  * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
  185  * this core.
  186  */
  187         cmpli   0, 0, %r3, FSL_E500mc
  188         beq     1f
  189         cmpli   0, 0, %r3, FSL_E5500
  190         beq     1f
  191         cmpli   0, 0, %r3, FSL_E6500
  192         beq     1f
  193 
  194         lis     %r3, HID1_E500_DEFAULT_SET@h
  195         ori     %r3, %r3, HID1_E500_DEFAULT_SET@l
  196         mtspr   SPR_HID1, %r3
  197         isync
  198 1:
  199         /* Invalidate all entries in TLB0 */
  200         li      %r3, 0
  201         bl      tlb_inval_all
  202 
  203         cmpwi   %r30, 0
  204         beq     done_mapping
  205 
  206 /*
  207  * Locate the TLB1 entry that maps this code
  208  */
  209         bl      1f
  210 1:      mflr    %r3
  211         bl      tlb1_find_current       /* the entry found is returned in r29 */
  212 
  213         bl      tlb1_inval_all_but_current
  214 
  215 /*
  216  * Create temporary mapping in AS=1 and switch to it
  217  */
  218         bl      tlb1_temp_mapping_as1
  219 
  220         mfmsr   %r3
  221         ori     %r3, %r3, (PSL_IS | PSL_DS)
  222         bl      2f
  223 2:      mflr    %r4
  224         addi    %r4, %r4, (3f - 2b)
  225         mtspr   SPR_SRR0, %r4
  226         mtspr   SPR_SRR1, %r3
  227         rfi                             /* Switch context */
  228 
  229 /*
  230  * Invalidate initial entry
  231  */
  232 3:
  233         mr      %r3, %r29
  234         bl      tlb1_inval_entry
  235 
  236 /*
  237  * Setup final mapping in TLB1[1] and switch to it
  238  */
  239         /* Final kernel mapping, map in 64 MB of RAM */
  240         lis     %r3, MAS0_TLBSEL1@h     /* Select TLB1 */
  241         li      %r4, 0                  /* Entry 0 */
  242         rlwimi  %r3, %r4, 16, 10, 15
  243         mtspr   SPR_MAS0, %r3
  244         isync
  245 
  246         li      %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
  247         oris    %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
  248         mtspr   SPR_MAS1, %r3           /* note TS was not filled, so it's TS=0 */
  249         isync
  250 
  251         LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
  252         ori     %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
  253         mtspr   SPR_MAS2, %r3
  254         isync
  255 
  256         /* Discover phys load address */
  257         bl      3f
  258 3:      mflr    %r4                     /* Use current address */
  259         rlwinm  %r4, %r4, 0, 0, 5       /* 64MB alignment mask */
  260         ori     %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
  261         mtspr   SPR_MAS3, %r4           /* Set RPN and protection */
  262         isync
  263         li      %r4, 0
  264         mtspr   SPR_MAS7, %r4
  265         isync
  266         tlbwe
  267         isync
  268         msync
  269 
  270         /* Switch to the above TLB1[1] mapping */
  271         bl      4f
  272 4:      mflr    %r4
  273 #ifdef __powerpc64__
  274         clrldi  %r4, %r4, 38
  275         clrrdi  %r3, %r3, 12
  276 #else
  277         rlwinm  %r4, %r4, 0, 6, 31      /* Current offset from kernel load address */
  278         rlwinm  %r3, %r3, 0, 0, 19
  279 #endif
  280         add     %r4, %r4, %r3           /* Convert to kernel virtual address */
  281         addi    %r4, %r4, (5f - 4b)
  282         li      %r3, PSL_DE             /* Note AS=0 */
  283 #ifdef __powerpc64__
  284         oris    %r3, %r3, PSL_CM@h
  285 #endif
  286         mtspr   SPR_SRR0, %r4
  287         mtspr   SPR_SRR1, %r3
  288         rfi
  289 
  290 /*
  291  * Invalidate temp mapping
  292  */
  293 5:
  294         mr      %r3, %r28
  295         bl      tlb1_inval_entry
  296 
  297 done_mapping:
  298 
  299 #ifdef __powerpc64__
  300         /* Set up the TOC pointer */
  301         b       0f
  302         .align 3
  303 0:      nop
  304         bl      1f
  305         .llong  __tocbase + 0x8000 - .
  306 1:      mflr    %r2
  307         ld      %r1,0(%r2)
  308         add     %r2,%r1,%r2
  309         mtspr   SPR_SPRG8, %r2
  310         nop
  311 
  312         /* Get load offset */
  313         ld      %r31,-0x8000(%r2) /* First TOC entry is TOC base */
  314         subf    %r31,%r31,%r2   /* Subtract from real TOC base to get base */
  315 
  316         /* Set up the stack pointer */
  317         bl      1f
  318         .llong  tmpstack + TMPSTACKSZ - 96 - .
  319 1:      mflr    %r3
  320         ld      %r1,0(%r3)
  321         add     %r1,%r1,%r3
  322 /*
  323  * Relocate kernel
  324  */
  325         bl      1f
  326         .llong _DYNAMIC-.
  327 1:      mflr    %r3
  328         ld      %r4,0(%r3)
  329         add     %r3,%r4,%r3
  330         mr      %r4,%r31
  331 #else
  332 /*
  333  * Setup a temporary stack
  334  */
  335         bl      1f
  336         .long tmpstack-.
  337 1:      mflr    %r1
  338         lwz     %r2,0(%r1)
  339         add     %r1,%r1,%r2
  340         addi    %r1, %r1, (TMPSTACKSZ - 16)
  341 
  342 /*
  343  * Relocate kernel
  344  */
  345         bl      1f
  346         .long   _DYNAMIC-.
  347         .long   _GLOBAL_OFFSET_TABLE_-.
  348 1:      mflr    %r5
  349         lwz     %r3,0(%r5)      /* _DYNAMIC in %r3 */
  350         add     %r3,%r3,%r5
  351         lwz     %r4,4(%r5)      /* GOT pointer */
  352         add     %r4,%r4,%r5
  353         lwz     %r4,4(%r4)      /* got[0] is _DYNAMIC link addr */
  354         subf    %r4,%r4,%r3     /* subtract to calculate relocbase */
  355 #endif
  356         bl      CNAME(elf_reloc_self)
  357         TOC_RESTORE
  358 
  359 /*
  360  * Initialise exception vector offsets
  361  */
  362         bl      CNAME(ivor_setup)
  363         TOC_RESTORE
  364 
  365 /*
  366  * Set up arguments and jump to system initialization code
  367  */
  368         mr      %r3, %r30
  369         mr      %r4, %r31
  370 
  371         /* Prepare core */
  372         bl      CNAME(booke_init)
  373         TOC_RESTORE
  374 
  375         /* Switch to thread0.td_kstack now */
  376         mr      %r1, %r3
  377         li      %r3, 0
  378         STORE   %r3, 0(%r1)
  379 
  380         /* Machine independet part, does not return */
  381         bl      CNAME(mi_startup)
  382         TOC_RESTORE
  383         /* NOT REACHED */
  384 5:      b       5b
  385 
  386 
  387 #ifdef SMP
  388 /************************************************************************/
  389 /* AP Boot page */
  390 /************************************************************************/
  391         .text
  392         .globl  __boot_page
  393         .align  12
  394 __boot_page:
  395         /*
  396          * The boot page is a special page of memory used during AP bringup.
  397          * Before the AP comes out of reset, the physical 4K page holding this
  398          * code is arranged to be mapped at 0xfffff000 by use of
  399          * platform-dependent registers.
  400          *
  401          * Alternatively, this page may be executed using an ePAPR-standardized
  402          * method -- writing to the address specified in "cpu-release-addr".
  403          *
  404          * In either case, execution begins at the last instruction of the
  405          * page, which is a branch back to the start of the page.
  406          *
  407          * The code in the page must do initial MMU setup and normalize the
  408          * TLBs for regular operation in the correct address space before
  409          * reading outside the page.
  410          *
  411          * This implementation accomplishes this by:
  412          * 1) Wiping TLB0 and all TLB1 entries but the one currently in use.
  413          * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching
  414          *    to it with rfi. This entry must NOT be in TLB1 slot 0.
  415          *    (This is needed to give the code freedom to clean up AS=0.)
  416          * 3) Removing the initial TLB1 entry, leaving us with a single valid
  417          *    TLB1 entry, NOT in slot 0.
  418          * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel
  419          *    segment at its final virtual address. A second rfi is done to
  420          *    switch to the final address space. At this point we can finally
  421          *    access the rest of the kernel segment safely.
  422          * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in
  423          *    a consistent (but minimal) state.
  424          * 6) Set up TOC, stack, and pcpu registers.
  425          * 7) Now that we can finally call C code, call pmap_boostrap_ap(),
  426          *    which finishes copying in the shared TLB1 entries.
  427          *
  428          * At this point, the MMU is fully set up, and we can proceed with
  429          * running the actual AP bootstrap code.
  430          *
  431          * Pieces of this code are also used for UP kernel, but in this case
  432          * the sections specific to boot page functionality are dropped by
  433          * the preprocessor.
  434          */
  435 #ifdef __powerpc64__
  436         nop                     /* PPC64 alignment word. 64-bit target. */
  437 #endif
  438         bl      1f              /* 32-bit target. */
  439 
  440         .globl  bp_trace
  441 bp_trace:
  442         ADDR(0)                 /* Trace pointer (%r31). */
  443 
  444         .globl  bp_kernload
  445 bp_kernload:
  446         .llong 0                /* Kern phys. load address. */
  447 
  448         .globl  bp_virtaddr
  449 bp_virtaddr:
  450         ADDR(0)                 /* Virt. address of __boot_page. */
  451 
  452 /*
  453  * Initial configuration
  454  */
  455 1:
  456         mflr    %r31            /* r31 hold the address of bp_trace */
  457 
  458         /* Set HIDs */
  459         mfpvr   %r3
  460         rlwinm  %r3, %r3, 16, 16, 31
  461 
  462         /* HID0 for E500 is default */
  463         lis     %r4, HID0_E500_DEFAULT_SET@h
  464         ori     %r4, %r4, HID0_E500_DEFAULT_SET@l
  465 
  466         cmpli   0, 0, %r3, FSL_E500mc
  467         bne     2f
  468         lis     %r4, HID0_E500MC_DEFAULT_SET@h
  469         ori     %r4, %r4, HID0_E500MC_DEFAULT_SET@l
  470         b       3f
  471 2:
  472         cmpli   0, 0, %r3, FSL_E5500
  473         bne     3f
  474         lis     %r4, HID0_E5500_DEFAULT_SET@h
  475         ori     %r4, %r4, HID0_E5500_DEFAULT_SET@l
  476 3:
  477         mtspr   SPR_HID0, %r4
  478         isync
  479 
  480         /* Enable branch prediction */
  481         li      %r3, BUCSR_BPEN
  482         mtspr   SPR_BUCSR, %r3
  483         isync
  484 
  485         /* Invalidate all entries in TLB0 */
  486         li      %r3, 0
  487         bl      tlb_inval_all
  488 
  489 /*
  490  * Find TLB1 entry which is translating us now
  491  */
  492         bl      2f
  493 2:      mflr    %r3
  494         bl      tlb1_find_current       /* the entry number found is in r29 */
  495 
  496         bl      tlb1_inval_all_but_current
  497 
  498 /*
  499  * Create temporary translation in AS=1 and switch to it
  500  */
  501 
  502         bl      tlb1_temp_mapping_as1
  503 
  504         mfmsr   %r3
  505         ori     %r3, %r3, (PSL_IS | PSL_DS)
  506 #ifdef __powerpc64__
  507         oris    %r3, %r3, PSL_CM@h      /* Ensure we're in 64-bit after RFI */
  508 #endif
  509         bl      3f
  510 3:      mflr    %r4
  511         addi    %r4, %r4, (4f - 3b)
  512         mtspr   SPR_SRR0, %r4
  513         mtspr   SPR_SRR1, %r3
  514         rfi                             /* Switch context */
  515 
  516 /*
  517  * Invalidate initial entry
  518  */
  519 4:
  520         mr      %r3, %r29
  521         bl      tlb1_inval_entry
  522 
  523 /*
  524  * Setup final mapping in TLB1[0] and switch to it
  525  */
  526         /* Final kernel mapping, map in 64 MB of RAM */
  527         lis     %r3, MAS0_TLBSEL1@h     /* Select TLB1 */
  528         li      %r4, 0                  /* Entry 0 */
  529         rlwimi  %r3, %r4, 16, 4, 15
  530         mtspr   SPR_MAS0, %r3
  531         isync
  532 
  533         li      %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
  534         oris    %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
  535         mtspr   SPR_MAS1, %r3           /* note TS was not filled, so it's TS=0 */
  536         isync
  537 
  538         LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
  539         ori     %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
  540         mtspr   SPR_MAS2, %r3
  541         isync
  542 
  543         /* Retrieve kernel load [physical] address from bp_kernload */
  544 5:
  545         mflr    %r3
  546 #ifdef __powerpc64__
  547         clrrdi  %r3, %r3, PAGE_SHIFT    /* trunc_page(%r3) */
  548 #else
  549         clrrwi  %r3, %r3, PAGE_SHIFT    /* trunc_page(%r3) */
  550 #endif
  551         /* Load lower half of the kernel loadaddr. */
  552         lwz     %r4, (bp_kernload - __boot_page + 4)(%r3)
  553         LOAD    %r5, (bp_virtaddr - __boot_page)(%r3)
  554 
  555         /* Set RPN and protection */
  556         ori     %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
  557         mtspr   SPR_MAS3, %r4
  558         isync
  559         lwz     %r4, (bp_kernload - __boot_page)(%r3)
  560         mtspr   SPR_MAS7, %r4
  561         isync
  562         tlbwe
  563         isync
  564         msync
  565 
  566         /* Switch to the final mapping */
  567         bl      6f
  568 6:      mflr    %r3
  569         rlwinm  %r3, %r3, 0, 0xfff      /* Offset from boot page start */
  570         add     %r3, %r3, %r5           /* Make this a virtual address */
  571         addi    %r3, %r3, (7f - 6b)     /* And figure out return address. */
  572 #ifdef __powerpc64__
  573         lis     %r4, PSL_CM@h           /* Note AS=0 */
  574 #else
  575         li      %r4, 0                  /* Note AS=0 */
  576 #endif
  577         mtspr   SPR_SRR0, %r3
  578         mtspr   SPR_SRR1, %r4
  579         rfi
  580 7:
  581 
  582 /*
  583  * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
  584  * beyond so it's allowed to directly access all locations the kernel was linked
  585  * against.
  586  */
  587 
  588 /*
  589  * Invalidate temp mapping
  590  */
  591         mr      %r3, %r28
  592         bl      tlb1_inval_entry
  593 
  594 #ifdef __powerpc64__
  595         /* Set up the TOC pointer */
  596         b       0f
  597         .align 3
  598 0:      nop
  599         bl      1f
  600         .llong  __tocbase + 0x8000 - .
  601 1:      mflr    %r2
  602         ld      %r1,0(%r2)
  603         add     %r2,%r1,%r2
  604         mtspr   SPR_SPRG8, %r2
  605 
  606         /* Set up the stack pointer */
  607         addis   %r1,%r2,TOC_REF(tmpstack)@ha
  608         ld      %r1,TOC_REF(tmpstack)@l(%r1)
  609         addi    %r1,%r1,TMPSTACKSZ-96
  610 #else
  611 /*
  612  * Setup a temporary stack
  613  */
  614         bl      1f
  615         .long tmpstack-.
  616 1:      mflr    %r1
  617         lwz     %r2,0(%r1)
  618         add     %r1,%r1,%r2
  619         stw     %r1, 0(%r1)
  620         addi    %r1, %r1, (TMPSTACKSZ - 16)
  621 #endif
  622 
  623 /*
  624  * Initialise exception vector offsets
  625  */
  626         bl      CNAME(ivor_setup)
  627         TOC_RESTORE
  628 
  629         /*
  630          * Assign our pcpu instance
  631          */
  632         bl      1f
  633         .long ap_pcpu-.
  634 1:      mflr    %r4
  635         lwz     %r3, 0(%r4)
  636         add     %r3, %r3, %r4
  637         LOAD    %r3, 0(%r3)
  638         mtsprg0 %r3
  639 
  640         bl      CNAME(pmap_bootstrap_ap)
  641         TOC_RESTORE
  642 
  643         bl      CNAME(cpudep_ap_bootstrap)
  644         TOC_RESTORE
  645         /* Switch to the idle thread's kstack */
  646         mr      %r1, %r3
  647         
  648         bl      CNAME(machdep_ap_bootstrap)
  649         TOC_RESTORE
  650 
  651         /* NOT REACHED */
  652 6:      b       6b
  653 #endif /* SMP */
  654 
  655 #if defined (BOOKE_E500)
  656 /*
  657  * Invalidate all entries in the given TLB.
  658  *
  659  * r3   TLBSEL
  660  */
  661 tlb_inval_all:
  662         rlwinm  %r3, %r3, 3, (1 << 3)   /* TLBSEL */
  663         ori     %r3, %r3, (1 << 2)      /* INVALL */
  664         tlbivax 0, %r3
  665         isync
  666         msync
  667 
  668         tlbsync
  669         msync
  670         blr
  671 
  672 /*
  673  * expects address to look up in r3, returns entry number in r29
  674  *
  675  * FIXME: the hidden assumption is we are now running in AS=0, but we should
  676  * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
  677  */
  678 tlb1_find_current:
  679         mfspr   %r17, SPR_PID0
  680         slwi    %r17, %r17, MAS6_SPID0_SHIFT
  681         mtspr   SPR_MAS6, %r17
  682         isync
  683         tlbsx   0, %r3
  684         mfspr   %r17, SPR_MAS0
  685         rlwinm  %r29, %r17, 16, 26, 31          /* MAS0[ESEL] -> r29 */
  686 
  687         /* Make sure we have IPROT set on the entry */
  688         mfspr   %r17, SPR_MAS1
  689         oris    %r17, %r17, MAS1_IPROT@h
  690         mtspr   SPR_MAS1, %r17
  691         isync
  692         tlbwe
  693         isync
  694         msync
  695         blr
  696 
  697 /*
  698  * Invalidates a single entry in TLB1.
  699  *
  700  * r3           ESEL
  701  * r4-r5        scratched
  702  */
  703 tlb1_inval_entry:
  704         lis     %r4, MAS0_TLBSEL1@h     /* Select TLB1 */
  705         rlwimi  %r4, %r3, 16, 10, 15    /* Select our entry */
  706         mtspr   SPR_MAS0, %r4
  707         isync
  708         tlbre
  709         li      %r5, 0                  /* MAS1[V] = 0 */
  710         mtspr   SPR_MAS1, %r5
  711         isync
  712         tlbwe
  713         isync
  714         msync
  715         blr
  716 
  717 /*
  718  * r29          current entry number
  719  * r28          returned temp entry
  720  * r3-r5        scratched
  721  */
  722 tlb1_temp_mapping_as1:
  723         /* Read our current translation */
  724         lis     %r3, MAS0_TLBSEL1@h     /* Select TLB1 */
  725         rlwimi  %r3, %r29, 16, 10, 15   /* Select our current entry */
  726         mtspr   SPR_MAS0, %r3
  727         isync
  728         tlbre
  729 
  730         /*
  731          * Prepare and write temp entry
  732          *
  733          * FIXME this is not robust against overflow i.e. when the current
  734          * entry is the last in TLB1
  735          */
  736         lis     %r3, MAS0_TLBSEL1@h     /* Select TLB1 */
  737         addi    %r28, %r29, 1           /* Use next entry. */
  738         rlwimi  %r3, %r28, 16, 10, 15   /* Select temp entry */
  739         mtspr   SPR_MAS0, %r3
  740         isync
  741         mfspr   %r5, SPR_MAS1
  742         li      %r4, 1                  /* AS=1 */
  743         rlwimi  %r5, %r4, 12, 19, 19
  744         li      %r4, 0                  /* Global mapping, TID=0 */
  745         rlwimi  %r5, %r4, 16, 8, 15
  746         oris    %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
  747         mtspr   SPR_MAS1, %r5
  748         isync
  749         mflr    %r3
  750         li      %r4, 0
  751         mtspr   SPR_MAS7, %r4
  752         mtlr    %r3
  753         isync
  754         tlbwe
  755         isync
  756         msync
  757         blr
  758 
  759 /*
  760  * Loops over TLB1, invalidates all entries skipping the one which currently
  761  * maps this code.
  762  *
  763  * r29          current entry
  764  * r3-r5        scratched
  765  */
  766 tlb1_inval_all_but_current:
  767         mfspr   %r3, SPR_TLB1CFG        /* Get number of entries */
  768         andi.   %r3, %r3, TLBCFG_NENTRY_MASK@l
  769         li      %r4, 0                  /* Start from Entry 0 */
  770 1:      lis     %r5, MAS0_TLBSEL1@h
  771         rlwimi  %r5, %r4, 16, 10, 15
  772         mtspr   SPR_MAS0, %r5
  773         isync
  774         tlbre
  775         mfspr   %r5, SPR_MAS1
  776         cmpw    %r4, %r29               /* our current entry? */
  777         beq     2f
  778         rlwinm  %r5, %r5, 0, 2, 31      /* clear VALID and IPROT bits */
  779         mtspr   SPR_MAS1, %r5
  780         isync
  781         tlbwe
  782         isync
  783         msync
  784 2:      addi    %r4, %r4, 1
  785         cmpw    %r4, %r3                /* Check if this is the last entry */
  786         bne     1b
  787         blr
  788 #endif
  789 
  790 #ifdef SMP
  791 .globl __boot_tlb1
  792         /*
  793          * The __boot_tlb1 table is used to hold BSP TLB1 entries
  794          * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
  795          * The BSP fills in the table in tlb_ap_prep() function. Next,
  796          * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
  797          */
  798 __boot_tlb1:
  799         .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
  800 
  801 __boot_page_padding:
  802         /*
  803          * Boot page needs to be exactly 4K, with the last word of this page
  804          * acting as the reset vector, so we need to stuff the remainder.
  805          * Upon release from holdoff CPU fetches the last word of the boot
  806          * page.
  807          */
  808         .space  4092 - (__boot_page_padding - __boot_page)
  809         b       __boot_page
  810         /*
  811          * This is the end of the boot page.
  812          * During AP startup, the previous instruction is at 0xfffffffc
  813          * virtual (i.e. the reset vector.)
  814          */
  815 #endif /* SMP */
  816 
  817 /************************************************************************/
  818 /* locore subroutines */
  819 /************************************************************************/
  820 
  821 /*
  822  * Cache disable/enable/inval sequences according
  823  * to section 2.16 of E500CORE RM.
  824  */
  825 ENTRY(dcache_inval)
  826         /* Invalidate d-cache */
  827         mfspr   %r3, SPR_L1CSR0
  828         ori     %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
  829         msync
  830         isync
  831         mtspr   SPR_L1CSR0, %r3
  832         isync
  833 1:      mfspr   %r3, SPR_L1CSR0
  834         andi.   %r3, %r3, L1CSR0_DCFI
  835         bne     1b
  836         blr
  837 END(dcache_inval)
  838 
  839 ENTRY(dcache_disable)
  840         /* Disable d-cache */
  841         mfspr   %r3, SPR_L1CSR0
  842         li      %r4, L1CSR0_DCE@l
  843         not     %r4, %r4
  844         and     %r3, %r3, %r4
  845         msync
  846         isync
  847         mtspr   SPR_L1CSR0, %r3
  848         isync
  849         blr
  850 END(dcache_disable)
  851 
  852 ENTRY(dcache_enable)
  853         /* Enable d-cache */
  854         mfspr   %r3, SPR_L1CSR0
  855         oris    %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
  856         ori     %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
  857         msync
  858         isync
  859         mtspr   SPR_L1CSR0, %r3
  860         isync
  861         blr
  862 END(dcache_enable)
  863 
  864 ENTRY(icache_inval)
  865         /* Invalidate i-cache */
  866         mfspr   %r3, SPR_L1CSR1
  867         ori     %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
  868         isync
  869         mtspr   SPR_L1CSR1, %r3
  870         isync
  871 1:      mfspr   %r3, SPR_L1CSR1
  872         andi.   %r3, %r3, L1CSR1_ICFI
  873         bne     1b
  874         blr
  875 END(icache_inval)
  876 
  877 ENTRY(icache_disable)
  878         /* Disable i-cache */
  879         mfspr   %r3, SPR_L1CSR1
  880         li      %r4, L1CSR1_ICE@l
  881         not     %r4, %r4
  882         and     %r3, %r3, %r4
  883         isync
  884         mtspr   SPR_L1CSR1, %r3
  885         isync
  886         blr
  887 END(icache_disable)
  888 
  889 ENTRY(icache_enable)
  890         /* Enable i-cache */
  891         mfspr   %r3, SPR_L1CSR1
  892         oris    %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
  893         ori     %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
  894         isync
  895         mtspr   SPR_L1CSR1, %r3
  896         isync
  897         blr
  898 END(icache_enable)
  899 
  900 /*
  901  * L2 cache disable/enable/inval sequences for E500mc.
  902  */
  903 
  904 ENTRY(l2cache_inval)
  905         mfspr   %r3, SPR_L2CSR0
  906         oris    %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
  907         ori     %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
  908         isync
  909         mtspr   SPR_L2CSR0, %r3
  910         isync
  911 1:      mfspr   %r3, SPR_L2CSR0
  912         andis.  %r3, %r3, L2CSR0_L2FI@h
  913         bne     1b
  914         blr
  915 END(l2cache_inval)
  916 
  917 ENTRY(l2cache_enable)
  918         mfspr   %r3, SPR_L2CSR0
  919         oris    %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
  920         isync
  921         mtspr   SPR_L2CSR0, %r3
  922         isync
  923         blr
  924 END(l2cache_enable)
  925 
  926 /*
  927  * Branch predictor setup.
  928  */
  929 ENTRY(bpred_enable)
  930         mfspr   %r3, SPR_BUCSR
  931         ori     %r3, %r3, BUCSR_BBFI
  932         isync
  933         mtspr   SPR_BUCSR, %r3
  934         isync
  935         ori     %r3, %r3, BUCSR_BPEN
  936         isync
  937         mtspr   SPR_BUCSR, %r3
  938         isync
  939         blr
  940 END(bpred_enable)
  941 
  942 /*
  943  * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
  944  * created.
  945  */
  946 ENTRY(get_spr)
  947         /* Note: The spr number is patched at runtime */
  948         mfspr   %r3, 0
  949         blr
  950 END(get_spr)
  951 
  952 /************************************************************************/
  953 /* Data section                                                         */
  954 /************************************************************************/
  955         .data
  956         .align 3
  957 GLOBAL(__startkernel)
  958         ADDR(begin)
  959 GLOBAL(__endkernel)
  960         ADDR(end)
  961         .align  4
  962 tmpstack:
  963         .space  TMPSTACKSZ
  964 tmpstackbound:
  965         .space 10240    /* XXX: this really should not be necessary */
  966 #ifdef __powerpc64__
  967 TOC_ENTRY(tmpstack)
  968 #ifdef SMP
  969 TOC_ENTRY(bp_kernload)
  970 #endif
  971 #endif
  972 
  973 /*
  974  * Compiled KERNBASE locations
  975  */
  976         .globl  kernbase
  977         .set    kernbase, KERNBASE
  978 
  979 #include <powerpc/booke/trap_subr.S>

Cache object: 8d747ae1b1d588cf1a1787d7b78bbf41


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.