The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_arm9.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc_asm_arm9.S,v 1.3 2004/01/26 15:54:16 rearnsha Exp $    */
    2 
    3 /*
    4  * Copyright (c) 2001, 2004 ARM Limited
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. The name of the company may not be used to endorse or promote
   16  *    products derived from this software without specific prior written
   17  *    permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   20  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   23  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  * ARM9 assembly functions for CPU / MMU / TLB specific operations
   32  */
   33 
   34 #include <machine/asm.h>
   35 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc_asm_arm9.S 248361 2013-03-16 02:48:49Z andrew $");
   36 
   37 /*
   38  * Functions to set the MMU Translation Table Base register
   39  *
   40  * We need to clean and flush the cache as it uses virtual
   41  * addresses that are about to change.
   42  */
   43 ENTRY(arm9_setttb)
   44         stmfd   sp!, {r0, lr}
   45         bl      _C_LABEL(arm9_idcache_wbinv_all)
   46         ldmfd   sp!, {r0, lr}
   47 
   48         mcr     p15, 0, r0, c2, c0, 0   /* load new TTB */
   49 
   50         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLBs */
   51         mov     pc, lr
   52 END(arm9_setttb)
   53 
   54 /*
   55  * TLB functions
   56  */
   57 ENTRY(arm9_tlb_flushID_SE)
   58         mcr     p15, 0, r0, c8, c6, 1   /* flush D tlb single entry */
   59         mcr     p15, 0, r0, c8, c5, 1   /* flush I tlb single entry */
   60         mov     pc, lr
   61 END(arm9_tlb_flushID_SE)
   62 
   63 /*
   64  * Cache operations.  For the entire cache we use the set/index
   65  * operations.
   66  */
   67         s_max   .req r0
   68         i_max   .req r1
   69         s_inc   .req r2
   70         i_inc   .req r3
   71 
   72 ENTRY_NP(arm9_icache_sync_range)
   73         ldr     ip, .Larm9_line_size
   74         cmp     r1, #0x4000
   75         bcs     .Larm9_icache_sync_all
   76         ldr     ip, [ip]
   77         sub     r3, ip, #1
   78         and     r2, r0, r3
   79         add     r1, r1, r2
   80         bic     r0, r0, r3
   81 .Larm9_sync_next:
   82         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
   83         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
   84         add     r0, r0, ip
   85         subs    r1, r1, ip
   86         bhi     .Larm9_sync_next
   87         mov     pc, lr
   88 END(arm9_icache_sync_range)
   89 
   90 ENTRY_NP(arm9_icache_sync_all)
   91 .Larm9_icache_sync_all:
   92         /*
   93          * We assume that the code here can never be out of sync with the
   94          * dcache, so that we can safely flush the Icache and fall through
   95          * into the Dcache cleaning code.
   96          */
   97         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
   98         /* Fall through to clean Dcache. */
   99 
  100 .Larm9_dcache_wb:
  101         ldr     ip, .Larm9_cache_data
  102         ldmia   ip, {s_max, i_max, s_inc, i_inc}
  103 .Lnext_set:
  104         orr     ip, s_max, i_max
  105 .Lnext_index:
  106         mcr     p15, 0, ip, c7, c10, 2  /* Clean D cache SE with Set/Index */
  107         subs    ip, ip, i_inc
  108         bhs     .Lnext_index            /* Next index */
  109         subs    s_max, s_max, s_inc
  110         bhs     .Lnext_set              /* Next set */
  111         mov     pc, lr
  112 END(arm9_icache_sync_all)
  113 
  114 .Larm9_line_size:
  115         .word   _C_LABEL(arm_pdcache_line_size)
  116 
  117 ENTRY(arm9_dcache_wb_range)
  118         ldr     ip, .Larm9_line_size
  119         cmp     r1, #0x4000
  120         bcs     .Larm9_dcache_wb
  121         ldr     ip, [ip]
  122         sub     r3, ip, #1
  123         and     r2, r0, r3
  124         add     r1, r1, r2
  125         bic     r0, r0, r3
  126 .Larm9_wb_next:
  127         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
  128         add     r0, r0, ip
  129         subs    r1, r1, ip
  130         bhi     .Larm9_wb_next
  131         mov     pc, lr
  132 END(arm9_dcache_wb_range)
  133         
  134 ENTRY(arm9_dcache_wbinv_range)
  135         ldr     ip, .Larm9_line_size
  136         cmp     r1, #0x4000
  137         bcs     .Larm9_dcache_wbinv_all
  138         ldr     ip, [ip]
  139         sub     r3, ip, #1
  140         and     r2, r0, r3
  141         add     r1, r1, r2
  142         bic     r0, r0, r3
  143 .Larm9_wbinv_next:
  144         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  145         add     r0, r0, ip
  146         subs    r1, r1, ip
  147         bhi     .Larm9_wbinv_next
  148         mov     pc, lr
  149 END(arm9_dcache_wbinv_range)
  150         
  151 /*
  152  * Note, we must not invalidate everything.  If the range is too big we
  153  * must use wb-inv of the entire cache.
  154  */
  155 ENTRY(arm9_dcache_inv_range)
  156         ldr     ip, .Larm9_line_size
  157         cmp     r1, #0x4000
  158         bcs     .Larm9_dcache_wbinv_all
  159         ldr     ip, [ip]
  160         sub     r3, ip, #1
  161         and     r2, r0, r3
  162         add     r1, r1, r2
  163         bic     r0, r0, r3
  164 .Larm9_inv_next:
  165         mcr     p15, 0, r0, c7, c6, 1   /* Invalidate D cache SE with VA */
  166         add     r0, r0, ip
  167         subs    r1, r1, ip
  168         bhi     .Larm9_inv_next
  169         mov     pc, lr
  170 END(arm9_dcache_inv_range)
  171 
  172 ENTRY(arm9_idcache_wbinv_range)
  173         ldr     ip, .Larm9_line_size
  174         cmp     r1, #0x4000
  175         bcs     .Larm9_idcache_wbinv_all
  176         ldr     ip, [ip]
  177         sub     r3, ip, #1
  178         and     r2, r0, r3
  179         add     r1, r1, r2
  180         bic     r0, r0, r3
  181 .Larm9_id_wbinv_next:
  182         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
  183         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  184         add     r0, r0, ip
  185         subs    r1, r1, ip
  186         bhi     .Larm9_id_wbinv_next
  187         mov     pc, lr
  188 END(arm9_idcache_wbinv_range)
  189 
  190 ENTRY_NP(arm9_idcache_wbinv_all)
  191 .Larm9_idcache_wbinv_all:
  192         /*
  193          * We assume that the code here can never be out of sync with the
  194          * dcache, so that we can safely flush the Icache and fall through
  195          * into the Dcache purging code.
  196          */
  197         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
  198         /* Fall through */
  199 
  200 ENTRY(arm9_dcache_wbinv_all)
  201 .Larm9_dcache_wbinv_all:
  202         ldr     ip, .Larm9_cache_data
  203         ldmia   ip, {s_max, i_max, s_inc, i_inc}
  204 .Lnext_set_inv:
  205         orr     ip, s_max, i_max
  206 .Lnext_index_inv:
  207         mcr     p15, 0, ip, c7, c14, 2  /* Purge D cache SE with Set/Index */
  208         subs    ip, ip, i_inc
  209         bhs     .Lnext_index_inv                /* Next index */
  210         subs    s_max, s_max, s_inc
  211         bhs     .Lnext_set_inv          /* Next set */
  212         mov     pc, lr
  213 END(arm9_idcache_wbinv_all)
  214 END(arm9_dcache_wbinv_all)
  215 
  216 .Larm9_cache_data:
  217         .word   _C_LABEL(arm9_dcache_sets_max)
  218 
  219 /*
  220  * Context switch.
  221  *
  222  * These is the CPU-specific parts of the context switcher cpu_switch()
  223  * These functions actually perform the TTB reload.
  224  *
  225  * NOTE: Special calling convention
  226  *      r1, r4-r13 must be preserved
  227  */
  228 ENTRY(arm9_context_switch)
  229         /*
  230          * We can assume that the caches will only contain kernel addresses
  231          * at this point.  So no need to flush them again.
  232          */
  233         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  234         mcr     p15, 0, r0, c2, c0, 0   /* set the new TTB */
  235         mcr     p15, 0, r0, c8, c7, 0   /* and flush the I+D tlbs */
  236 
  237         /* Paranoia -- make sure the pipeline is empty. */
  238         nop
  239         nop
  240         nop
  241         mov     pc, lr
  242 END(arm9_context_switch)
  243 
  244         .bss
  245 
  246 /* XXX The following macros should probably be moved to asm.h */
  247 #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
  248 #define C_OBJECT(x)     _DATA_OBJECT(_C_LABEL(x))
  249 
  250 /*
  251  * Parameters for the cache cleaning code.  Note that the order of these
  252  * four variables is assumed in the code above.  Hence the reason for
  253  * declaring them in the assembler file.
  254  */
  255         .align 0
  256 C_OBJECT(arm9_dcache_sets_max)
  257         .space  4
  258 C_OBJECT(arm9_dcache_index_max)
  259         .space  4
  260 C_OBJECT(arm9_dcache_sets_inc)
  261         .space  4
  262 C_OBJECT(arm9_dcache_index_inc)
  263         .space  4

Cache object: f2d5a0ecd70f3375a870a23f62b3ce54


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.