The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_armv5_ec.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc_asm_armv5_ec.S,v 1.1 2007/01/06 00:50:54 christos Exp $        */
    2 
    3 /*
    4  * Copyright (c) 2002, 2005 ARM Limited
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. The name of the company may not be used to endorse or promote
   16  *    products derived from this software without specific prior written
   17  *    permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   20  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   23  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  * ARMv5 assembly functions for manipulating caches.
   32  * These routines can be used by any core that supports both the set/index
   33  * operations and the test and clean operations for efficiently cleaning the
   34  * entire DCache.  If a core does not have the test and clean operations, but
   35  * does have the set/index operations, use the routines in cpufunc_asm_armv5.S.
   36  * This source was derived from that file.
   37  */
   38 
   39 #include <machine/asm.h>
   40 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc_asm_armv5_ec.S 248361 2013-03-16 02:48:49Z andrew $");
   41 
   42 /*
   43  * Functions to set the MMU Translation Table Base register
   44  *
   45  * We need to clean and flush the cache as it uses virtual
   46  * addresses that are about to change.
   47  */
   48 ENTRY(armv5_ec_setttb)
   49         /*
   50          * Some other ARM ports save registers on the stack, call the
   51          * idcache_wbinv_all function and then restore the registers from the
   52          * stack before setting the TTB.  I observed that this caused a
   53          * problem when the old and new translation table entries' buffering
   54          * bits were different.  If I saved the registers in other registers
   55          * or invalidated the caches when I returned from idcache_wbinv_all,
   56          * it worked fine.  If not, I ended up executing at an invalid PC.
   57          * For armv5_ec_settb, the idcache_wbinv_all is simple enough, I just
   58          * do it directly and entirely avoid the problem.
   59          */
   60         mcr     p15, 0, r0, c7, c5, 0   /* Invalidate ICache */
   61 1:      mrc     p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
   62         bne     1b                      /* More to do? */
   63         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
   64 
   65         mcr     p15, 0, r0, c2, c0, 0   /* load new TTB */
   66 
   67         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLBs */
   68         RET
   69 END(armv5_ec_setttb)
   70 
   71 /*
   72  * Cache operations.  For the entire cache we use the enhanced cache
   73  * operations.
   74  */
   75 
   76 ENTRY_NP(armv5_ec_icache_sync_range)
   77         ldr     ip, .Larmv5_ec_line_size
   78         cmp     r1, #0x4000
   79         bcs     .Larmv5_ec_icache_sync_all
   80         ldr     ip, [ip]
   81         sub     r1, r1, #1              /* Don't overrun */
   82         sub     r3, ip, #1
   83         and     r2, r0, r3
   84         add     r1, r1, r2
   85         bic     r0, r0, r3
   86 1:
   87         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
   88         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
   89         add     r0, r0, ip
   90         subs    r1, r1, ip
   91         bpl     1b
   92         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
   93         RET
   94 END(armv5_ec_icache_sync_range)
   95 
   96 ENTRY_NP(armv5_ec_icache_sync_all)
   97 .Larmv5_ec_icache_sync_all:
   98         /*
   99          * We assume that the code here can never be out of sync with the
  100          * dcache, so that we can safely flush the Icache and fall through
  101          * into the Dcache cleaning code.
  102          */
  103         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
  104         /* Fall through to clean Dcache. */
  105 
  106 .Larmv5_ec_dcache_wb:
  107 1:
  108         mrc     p15, 0, r15, c7, c10, 3 /* Test and clean (don't invalidate) */
  109         bne     1b                      /* More to do? */
  110         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  111         RET
  112 END(armv5_ec_icache_sync_all)
  113 
  114 .Larmv5_ec_line_size:
  115         .word   _C_LABEL(arm_pdcache_line_size)
  116 
  117 ENTRY(armv5_ec_dcache_wb_range)
  118         ldr     ip, .Larmv5_ec_line_size
  119         cmp     r1, #0x4000
  120         bcs     .Larmv5_ec_dcache_wb
  121         ldr     ip, [ip]
  122         sub     r1, r1, #1              /* Don't overrun */
  123         sub     r3, ip, #1
  124         and     r2, r0, r3
  125         add     r1, r1, r2
  126         bic     r0, r0, r3
  127 1:
  128         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
  129         add     r0, r0, ip
  130         subs    r1, r1, ip
  131         bpl     1b
  132         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  133         RET
  134 END(armv5_ec_dcache_wb_range)
  135 
  136 ENTRY(armv5_ec_dcache_wbinv_range)
  137         ldr     ip, .Larmv5_ec_line_size
  138         cmp     r1, #0x4000
  139         bcs     .Larmv5_ec_dcache_wbinv_all
  140         ldr     ip, [ip]
  141         sub     r1, r1, #1              /* Don't overrun */
  142         sub     r3, ip, #1
  143         and     r2, r0, r3
  144         add     r1, r1, r2
  145         bic     r0, r0, r3
  146 1:
  147         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  148         add     r0, r0, ip
  149         subs    r1, r1, ip
  150         bpl     1b
  151         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  152         RET
  153 END(armv5_ec_dcache_wbinv_range)
  154 
  155 /*
  156  * Note, we must not invalidate everything.  If the range is too big we
  157  * must use wb-inv of the entire cache.
  158  */
  159 ENTRY(armv5_ec_dcache_inv_range)
  160         ldr     ip, .Larmv5_ec_line_size
  161         cmp     r1, #0x4000
  162         bcs     .Larmv5_ec_dcache_wbinv_all
  163         ldr     ip, [ip]
  164         sub     r1, r1, #1              /* Don't overrun */
  165         sub     r3, ip, #1
  166         and     r2, r0, r3
  167         add     r1, r1, r2
  168         bic     r0, r0, r3
  169 1:
  170         mcr     p15, 0, r0, c7, c6, 1   /* Invalidate D cache SE with VA */
  171         add     r0, r0, ip
  172         subs    r1, r1, ip
  173         bpl     1b
  174         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  175         RET
  176 END(armv5_ec_dcache_inv_range)
  177 
  178 ENTRY(armv5_ec_idcache_wbinv_range)
  179         ldr     ip, .Larmv5_ec_line_size
  180         cmp     r1, #0x4000
  181         bcs     .Larmv5_ec_idcache_wbinv_all
  182         ldr     ip, [ip]
  183         sub     r1, r1, #1              /* Don't overrun */
  184         sub     r3, ip, #1
  185         and     r2, r0, r3
  186         add     r1, r1, r2
  187         bic     r0, r0, r3
  188 1:
  189         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
  190         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  191         add     r0, r0, ip
  192         subs    r1, r1, ip
  193         bpl     1b
  194         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  195         RET
  196 END(armv5_ec_idcache_wbinv_range)
  197 
  198 ENTRY_NP(armv5_ec_idcache_wbinv_all)
  199 .Larmv5_ec_idcache_wbinv_all:
  200         /*
  201          * We assume that the code here can never be out of sync with the
  202          * dcache, so that we can safely flush the Icache and fall through
  203          * into the Dcache purging code.
  204          */
  205         mcr     p15, 0, r0, c7, c5, 0   /* Invalidate ICache */
  206         /* Fall through to purge Dcache. */
  207 END(armv5_ec_idcache_wbinv_all)
  208 
  209 ENTRY(armv5_ec_dcache_wbinv_all)
  210 .Larmv5_ec_dcache_wbinv_all:
  211 1:      mrc     p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
  212         bne     1b                      /* More to do? */
  213         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  214         RET
  215 END(armv5_ec_dcache_wbinv_all)
  216 

Cache object: 7e5414569981c7cd5ab1dd3ed465748a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.