The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_xscale_c3.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc_asm_xscale.S,v 1.16 2002/08/17 16:36:32 thorpej Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 2007 Olivier Houchard
    5  * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
    6  * All rights reserved.
    7  *
    8  * Written by Allen Briggs and Jason R. Thorpe for Wasabi Systems, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed for the NetBSD Project by
   21  *      Wasabi Systems, Inc.
   22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   23  *    or promote products derived from this software without specific prior
   24  *    written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   36  * POSSIBILITY OF SUCH DAMAGE.
   37  *
   38  */
   39 
   40 /*-
   41  * Copyright (c) 2001 Matt Thomas.
   42  * Copyright (c) 1997,1998 Mark Brinicombe.
   43  * Copyright (c) 1997 Causality Limited
   44  * All rights reserved.
   45  *
   46  * Redistribution and use in source and binary forms, with or without
   47  * modification, are permitted provided that the following conditions
   48  * are met:
   49  * 1. Redistributions of source code must retain the above copyright
   50  *    notice, this list of conditions and the following disclaimer.
   51  * 2. Redistributions in binary form must reproduce the above copyright
   52  *    notice, this list of conditions and the following disclaimer in the
   53  *    documentation and/or other materials provided with the distribution.
   54  * 3. All advertising materials mentioning features or use of this software
   55  *    must display the following acknowledgement:
   56  *      This product includes software developed by Causality Limited.
   57  * 4. The name of Causality Limited may not be used to endorse or promote
   58  *    products derived from this software without specific prior written
   59  *    permission.
   60  *
   61  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   62  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   63  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   64  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   65  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   66  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   67  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   71  * SUCH DAMAGE.
   72  *
   73  * XScale core 3 assembly functions for CPU / MMU / TLB specific operations
   74  */
   75 
   76 #include <machine/asm.h>
   77 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc_asm_xscale_c3.S 248361 2013-03-16 02:48:49Z andrew $");
   78 
   79 /*
   80  * Size of the XScale core D-cache.
   81  */
   82 #define DCACHE_SIZE             0x00008000
   83 
   84 .Lblock_userspace_access:
   85         .word   _C_LABEL(block_userspace_access)
   86 
   87 /*
   88  * CPWAIT -- Canonical method to wait for CP15 update.
   89  * From: Intel 80200 manual, section 2.3.3.
   90  *
   91  * NOTE: Clobbers the specified temp reg.
   92  */
   93 #define CPWAIT_BRANCH                                                    \
   94         sub     pc, pc, #4
   95 
   96 #define CPWAIT(tmp)                                                      \
   97         mrc     p15, 0, tmp, c2, c0, 0  /* arbitrary read of CP15 */    ;\
   98         mov     tmp, tmp                /* wait for it to complete */   ;\
   99         CPWAIT_BRANCH                   /* branch to next insn */
  100 
  101 #define CPWAIT_AND_RETURN_SHIFTER       lsr #32
  102 
  103 #define CPWAIT_AND_RETURN(tmp)                                           \
  104         mrc     p15, 0, tmp, c2, c0, 0  /* arbitrary read of CP15 */    ;\
  105         /* Wait for it to complete and branch to the return address */   \
  106         sub     pc, lr, tmp, CPWAIT_AND_RETURN_SHIFTER
  107 
  108 #define ARM_USE_L2_CACHE
  109 
  110 #define L2_CACHE_SIZE           0x80000
  111 #define L2_CACHE_WAYS           8
  112 #define L2_CACHE_LINE_SIZE      32
  113 #define L2_CACHE_SETS           (L2_CACHE_SIZE / \
  114     (L2_CACHE_WAYS * L2_CACHE_LINE_SIZE))
  115 
  116 #define L1_DCACHE_SIZE          32 * 1024
  117 #define L1_DCACHE_WAYS          4
  118 #define L1_DCACHE_LINE_SIZE     32
  119 #define L1_DCACHE_SETS          (L1_DCACHE_SIZE / \
  120     (L1_DCACHE_WAYS * L1_DCACHE_LINE_SIZE))
  121 #ifdef CACHE_CLEAN_BLOCK_INTR
  122 #define XSCALE_CACHE_CLEAN_BLOCK                                        \
  123         stmfd   sp!, {r4}                                       ;       \
  124         mrs     r4, cpsr_all                                    ;       \
  125         orr     r0, r4, #(I32_bit | F32_bit)                    ;       \
  126         msr     cpsr_all, r0
  127 
  128 #define XSCALE_CACHE_CLEAN_UNBLOCK                                      \
  129         msr     cpsr_all, r4                                    ;       \
  130         ldmfd   sp!, {r4}
  131 #else
  132 #define XSCALE_CACHE_CLEAN_BLOCK                                        \
  133         stmfd   sp!, {r4}                                       ;       \
  134         ldr     r4, .Lblock_userspace_access                    ;       \
  135         ldr     ip, [r4]                                        ;       \
  136         orr     r0, ip, #1                                      ;       \
  137         str     r0, [r4]        
  138 
  139 #define XSCALE_CACHE_CLEAN_UNBLOCK                                      \
  140         str     ip, [r3]                                        ;       \
  141         ldmfd   sp!, {r4}
  142 #endif /* CACHE_CLEAN_BLOCK_INTR */
  143 
  144 
  145 ENTRY_NP(xscalec3_cache_syncI)
  146 ENTRY_NP(xscalec3_cache_purgeID)
  147         mcr     p15, 0, r0, c7, c5, 0   /* flush I cache (D cleaned below) */
  148 ENTRY_NP(xscalec3_cache_cleanID)
  149 ENTRY_NP(xscalec3_cache_purgeD)
  150 ENTRY(xscalec3_cache_cleanD)
  151 
  152         XSCALE_CACHE_CLEAN_BLOCK
  153         mov     r0, #0
  154 1:
  155         mov     r1, r0, asl #30
  156         mov     r2, #0
  157 2:
  158         orr     r3, r1, r2, asl #5
  159         mcr     p15, 0, r3, c7, c14, 2  /* clean and invalidate */
  160         add     r2, r2, #1
  161         cmp     r2, #L1_DCACHE_SETS
  162         bne     2b
  163         add     r0, r0, #1
  164         cmp     r0, #4
  165         bne     1b
  166         CPWAIT(r0)
  167         XSCALE_CACHE_CLEAN_UNBLOCK
  168         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  169 
  170         RET
  171 END(xscalec3_cache_syncI)
  172 END(xscalec3_cache_purgeID)
  173 END(xscalec3_cache_cleanID)
  174 END(xscalec3_cache_purgeD)
  175 END(xscalec3_cache_cleanD)
  176 
  177 ENTRY(xscalec3_cache_purgeID_rng)
  178 
  179         cmp     r1, #0x4000
  180         bcs     _C_LABEL(xscalec3_cache_cleanID)
  181         and     r2, r0, #0x1f
  182         add     r1, r1, r2
  183         bic     r0, r0, #0x1f
  184 
  185 1:      mcr     p15, 0, r0, c7, c14, 1  /* clean/invalidate L1 D cache entry */
  186         nop
  187         mcr     p15, 0, r0, c7, c5, 1   /* flush I cache single entry */
  188         add     r0, r0, #32
  189         subs    r1, r1, #32
  190         bhi     1b
  191 
  192         CPWAIT(r0)
  193 
  194         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  195 
  196         CPWAIT_AND_RETURN(r0)
  197 END(xscalec3_cache_purgeID_rng)
  198 
  199 ENTRY(xscalec3_cache_syncI_rng)
  200         cmp     r1, #0x4000
  201         bcs     _C_LABEL(xscalec3_cache_syncI)
  202 
  203         and     r2, r0, #0x1f
  204         add     r1, r1, r2
  205         bic     r0, r0, #0x1f
  206 
  207 1:      mcr     p15, 0, r0, c7, c10, 1  /* clean D cache entry */
  208         mcr     p15, 0, r0, c7, c5, 1   /* flush I cache single entry */
  209         add     r0, r0, #32
  210         subs    r1, r1, #32
  211         bhi     1b
  212 
  213         CPWAIT(r0)
  214 
  215         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  216 
  217         CPWAIT_AND_RETURN(r0)
  218 END(xscalec3_cache_syncI_rng)
  219         
  220 ENTRY(xscalec3_cache_purgeD_rng)
  221 
  222         cmp     r1, #0x4000
  223         bcs     _C_LABEL(xscalec3_cache_cleanID)
  224         and     r2, r0, #0x1f
  225         add     r1, r1, r2
  226         bic     r0, r0, #0x1f
  227 
  228 1:      mcr     p15, 0, r0, c7, c14, 1  /* Clean and invalidate D cache entry */
  229         add     r0, r0, #32
  230         subs    r1, r1, #32
  231         bhi     1b
  232 
  233         CPWAIT(r0)
  234 
  235         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  236 
  237         CPWAIT_AND_RETURN(r0)
  238 END(xscalec3_cache_purgeD_rng)
  239 
  240 ENTRY(xscalec3_cache_cleanID_rng)
  241 ENTRY(xscalec3_cache_cleanD_rng)
  242 
  243         cmp     r1, #0x4000
  244         bcs     _C_LABEL(xscalec3_cache_cleanID)
  245         and     r2, r0, #0x1f
  246         add     r1, r1, r2
  247         bic     r0, r0, #0x1f
  248 
  249 1:      mcr     p15, 0, r0, c7, c10, 1  /* clean L1 D cache entry */
  250         nop
  251         add     r0, r0, #32
  252         subs    r1, r1, #32
  253         bhi     1b
  254 
  255         CPWAIT(r0)
  256 
  257         mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
  258 
  259         CPWAIT_AND_RETURN(r0)
  260 END(xscalec3_cache_cleanID_rng)
  261 END(xscalec3_cache_cleanD_rng)
  262 
  263 ENTRY(xscalec3_l2cache_purge)
  264         /* Clean-up the L2 cache */
  265         mcr     p15, 0, r0, c7, c10, 5  /* Data memory barrier */
  266         mov     r0, #0
  267 1:
  268         mov     r1, r0, asl #29
  269         mov     r2, #0
  270 2:
  271         orr     r3, r1, r2, asl #5
  272         mcr     p15, 1, r3, c7, c15, 2
  273         add     r2, r2, #1
  274         cmp     r2, #L2_CACHE_SETS
  275         bne     2b
  276         add     r0, r0, #1
  277         cmp     r0, #8
  278         bne     1b
  279         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
  280 
  281         CPWAIT(r0)
  282         mcr     p15, 0, r0, c7, c10, 5  /* Data memory barrier */
  283         RET
  284 END(xscalec3_l2cache_purge)
  285 
  286 ENTRY(xscalec3_l2cache_clean_rng)
  287         mcr     p15, 0, r0, c7, c10, 5  /* Data memory barrier */
  288 
  289         and     r2, r0, #0x1f
  290         add     r1, r1, r2
  291         bic     r0, r0, #0x1f
  292 
  293 1:      mcr     p15, 1, r0, c7, c11, 1  /* Clean L2 D cache entry */
  294         add     r0, r0, #32
  295         subs    r1, r1, #32
  296         bhi     1b
  297 
  298 
  299         CPWAIT(r0)
  300 
  301         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
  302         mcr     p15, 0, r0, c7, c10, 5
  303 
  304         CPWAIT_AND_RETURN(r0)
  305 END(xscalec3_l2cache_clean_rng)
  306 
  307 ENTRY(xscalec3_l2cache_purge_rng)
  308 
  309         mcr     p15, 0, r0, c7, c10, 5  /* Data memory barrier */
  310 
  311         and     r2, r0, #0x1f
  312         add     r1, r1, r2
  313         bic     r0, r0, #0x1f
  314 
  315 1:      mcr     p15, 1, r0, c7, c11, 1  /* Clean L2 D cache entry */
  316         mcr     p15, 1, r0, c7, c7, 1   /* Invalidate L2 D cache entry */
  317         add     r0, r0, #32
  318         subs    r1, r1, #32
  319         bhi     1b
  320 
  321         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
  322         mcr     p15, 0, r0, c7, c10, 5
  323 
  324         CPWAIT_AND_RETURN(r0)
  325 END(xscalec3_l2cache_purge_rng)
  326 
  327 ENTRY(xscalec3_l2cache_flush_rng)
  328         mcr     p15, 0, r0, c7, c10, 5  /* Data memory barrier */
  329 
  330         and     r2, r0, #0x1f
  331         add     r1, r1, r2
  332         bic     r0, r0, #0x1f
  333 
  334 1:      mcr     p15, 1, r0, c7, c7, 1   /* Invalidate L2 cache line */
  335         add     r0, r0, #32
  336         subs    r1, r1, #32
  337         bhi     1b
  338         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
  339         mcr     p15, 0, r0, c7, c10, 5
  340         CPWAIT_AND_RETURN(r0)
  341 END(xscalec3_l2cache_flush_rng)
  342 
  343 /*
  344  * Functions to set the MMU Translation Table Base register
  345  *
  346  * We need to clean and flush the cache as it uses virtual
  347  * addresses that are about to change.
  348  */
  349 ENTRY(xscalec3_setttb)
  350 #ifdef CACHE_CLEAN_BLOCK_INTR
  351         mrs     r3, cpsr_all
  352         orr     r1, r3, #(I32_bit | F32_bit)
  353         msr     cpsr_all, r1
  354 #else
  355         ldr     r3, .Lblock_userspace_access
  356         ldr     r2, [r3]
  357         orr     r1, r2, #1
  358         str     r1, [r3]
  359 #endif
  360         stmfd   sp!, {r0-r3, lr}
  361         bl      _C_LABEL(xscalec3_cache_cleanID)
  362         mcr     p15, 0, r0, c7, c5, 0   /* invalidate I$ and BTB */
  363         mcr     p15, 0, r0, c7, c10, 4  /* drain write and fill buffer */
  364 
  365         CPWAIT(r0)
  366 
  367         ldmfd   sp!, {r0-r3, lr}
  368 
  369 #ifdef ARM_USE_L2_CACHE
  370         orr     r0, r0, #0x18   /* cache the page table in L2 */
  371 #endif
  372         /* Write the TTB */
  373         mcr     p15, 0, r0, c2, c0, 0
  374 
  375         /* If we have updated the TTB we must flush the TLB */
  376         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLB */
  377 
  378         CPWAIT(r0)
  379 
  380 #ifdef CACHE_CLEAN_BLOCK_INTR
  381         msr     cpsr_all, r3
  382 #else
  383         str     r2, [r3]
  384 #endif
  385         RET
  386 END(xscalec3_setttb)
  387 
  388 /*
  389  * Context switch.
  390  *
  391  * These is the CPU-specific parts of the context switcher cpu_switch()
  392  * These functions actually perform the TTB reload.
  393  *
  394  * NOTE: Special calling convention
  395  *      r1, r4-r13 must be preserved
  396  */
  397 ENTRY(xscalec3_context_switch)
  398         /*
  399          * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
  400          * Thus the data cache will contain only kernel data and the
  401          * instruction cache will contain only kernel code, and all
  402          * kernel mappings are shared by all processes.
  403          */
  404 #ifdef ARM_USE_L2_CACHE
  405         orr     r0, r0, #0x18   /* Cache the page table in L2 */
  406 #endif
  407         /* Write the TTB */
  408         mcr     p15, 0, r0, c2, c0, 0
  409 
  410         /* If we have updated the TTB we must flush the TLB */
  411         mcr     p15, 0, r0, c8, c7, 0   /* flush the I+D tlb */
  412 
  413         CPWAIT_AND_RETURN(r0)
  414 END(xscalec3_context_switch)
  415 

Cache object: 428b02e2968f68cd29d99b0f7dea9355


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.