The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_arm10.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $   */
    2 
    3 /*-
    4  * Copyright (c) 2002 ARM Limited
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. The name of the company may not be used to endorse or promote
   16  *    products derived from this software without specific prior written
   17  *    permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   20  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   23  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  * ARM10 assembly functions for CPU / MMU / TLB specific operations
   32  *
   33  */
   34  
   35 #include <machine/asm.h>
   36 __FBSDID("$FreeBSD: releng/6.2/sys/arm/arm/cpufunc_asm_arm10.S 139735 2005-01-05 21:58:49Z imp $");
   37 
   38 /*
   39  * Functions to set the MMU Translation Table Base register
   40  *
   41  * We need to clean and flush the cache as it uses virtual
   42  * addresses that are about to change.
   43  */
   44 ENTRY(arm10_setttb)
   45         stmfd   sp!, {r0, lr}
   46         bl      _C_LABEL(arm10_idcache_wbinv_all)
   47         ldmfd   sp!, {r0, lr}
   48 
   49         mcr     p15, 0, r0, c2, c0, 0   /* load new TTB */
   50 
   51         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLBs */
   52         bx      lr
   53 
   54 /*
   55  * TLB functions
   56  */
   57 ENTRY(arm10_tlb_flushID_SE)
   58         mcr     p15, 0, r0, c8, c6, 1   /* flush D tlb single entry */
   59         mcr     p15, 0, r0, c8, c5, 1   /* flush I tlb single entry */
   60         bx      lr
   61 
   62 ENTRY(arm10_tlb_flushI_SE)
   63         mcr     p15, 0, r0, c8, c5, 1   /* flush I tlb single entry */
   64         bx      lr
   65         
   66 
   67 /*
   68  * Cache operations.  For the entire cache we use the set/index
   69  * operations.
   70  */
   71         s_max   .req r0
   72         i_max   .req r1
   73         s_inc   .req r2
   74         i_inc   .req r3
   75 
   76 ENTRY_NP(arm10_icache_sync_range)
   77         ldr     ip, .Larm10_line_size
   78         cmp     r1, #0x4000
   79         bcs     .Larm10_icache_sync_all
   80         ldr     ip, [ip]
   81         sub     r3, ip, #1
   82         and     r2, r0, r3
   83         add     r1, r1, r2
   84         bic     r0, r0, r3
   85 .Larm10_sync_next:
   86         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
   87         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
   88         add     r0, r0, ip
   89         subs    r1, r1, ip
   90         bpl     .Larm10_sync_next
   91         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
   92         bx      lr
   93 
   94 ENTRY_NP(arm10_icache_sync_all)
   95 .Larm10_icache_sync_all:
   96         /*
   97          * We assume that the code here can never be out of sync with the
   98          * dcache, so that we can safely flush the Icache and fall through
   99          * into the Dcache cleaning code.
  100          */
  101         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
  102         /* Fall through to clean Dcache. */
  103 
  104 .Larm10_dcache_wb:
  105         ldr     ip, .Larm10_cache_data
  106         ldmia   ip, {s_max, i_max, s_inc, i_inc}
  107 .Lnext_set:
  108         orr     ip, s_max, i_max
  109 .Lnext_index:
  110         mcr     p15, 0, ip, c7, c10, 2  /* Clean D cache SE with Set/Index */
  111         sub     ip, ip, i_inc
  112         tst     ip, i_max               /* Index 0 is last one */
  113         bne     .Lnext_index            /* Next index */
  114         mcr     p15, 0, ip, c7, c10, 2  /* Clean D cache SE with Set/Index */
  115         subs    s_max, s_max, s_inc
  116         bpl     .Lnext_set              /* Next set */
  117         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  118         bx      lr
  119 
  120 .Larm10_line_size:
  121         .word   _C_LABEL(arm_pdcache_line_size)
  122 
  123 ENTRY(arm10_dcache_wb_range)
  124         ldr     ip, .Larm10_line_size
  125         cmp     r1, #0x4000
  126         bcs     .Larm10_dcache_wb
  127         ldr     ip, [ip]
  128         sub     r3, ip, #1
  129         and     r2, r0, r3
  130         add     r1, r1, r2
  131         bic     r0, r0, r3
  132 .Larm10_wb_next:
  133         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
  134         add     r0, r0, ip
  135         subs    r1, r1, ip
  136         bpl     .Larm10_wb_next
  137         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  138         bx      lr
  139         
  140 ENTRY(arm10_dcache_wbinv_range)
  141         ldr     ip, .Larm10_line_size
  142         cmp     r1, #0x4000
  143         bcs     .Larm10_dcache_wbinv_all
  144         ldr     ip, [ip]
  145         sub     r3, ip, #1
  146         and     r2, r0, r3
  147         add     r1, r1, r2
  148         bic     r0, r0, r3
  149 .Larm10_wbinv_next:
  150         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  151         add     r0, r0, ip
  152         subs    r1, r1, ip
  153         bpl     .Larm10_wbinv_next
  154         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  155         bx      lr
  156         
  157 /*
  158  * Note, we must not invalidate everything.  If the range is too big we
  159  * must use wb-inv of the entire cache.
  160  */
  161 ENTRY(arm10_dcache_inv_range)
  162         ldr     ip, .Larm10_line_size
  163         cmp     r1, #0x4000
  164         bcs     .Larm10_dcache_wbinv_all
  165         ldr     ip, [ip]
  166         sub     r3, ip, #1
  167         and     r2, r0, r3
  168         add     r1, r1, r2
  169         bic     r0, r0, r3
  170 .Larm10_inv_next:
  171         mcr     p15, 0, r0, c7, c6, 1   /* Invalidate D cache SE with VA */
  172         add     r0, r0, ip
  173         subs    r1, r1, ip
  174         bpl     .Larm10_inv_next
  175         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  176         bx      lr
  177 
  178 ENTRY(arm10_idcache_wbinv_range)
  179         ldr     ip, .Larm10_line_size
  180         cmp     r1, #0x4000
  181         bcs     .Larm10_idcache_wbinv_all
  182         ldr     ip, [ip]
  183         sub     r3, ip, #1
  184         and     r2, r0, r3
  185         add     r1, r1, r2
  186         bic     r0, r0, r3
  187 .Larm10_id_wbinv_next:
  188         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
  189         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  190         add     r0, r0, ip
  191         subs    r1, r1, ip
  192         bpl     .Larm10_id_wbinv_next
  193         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  194         bx      lr
  195 
  196 ENTRY_NP(arm10_idcache_wbinv_all)
  197 .Larm10_idcache_wbinv_all:
  198         /*
  199          * We assume that the code here can never be out of sync with the
  200          * dcache, so that we can safely flush the Icache and fall through
  201          * into the Dcache purging code.
  202          */
  203         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
  204         /* Fall through to purge Dcache. */
  205 
  206 ENTRY(arm10_dcache_wbinv_all)
  207 .Larm10_dcache_wbinv_all:
  208         ldr     ip, .Larm10_cache_data
  209         ldmia   ip, {s_max, i_max, s_inc, i_inc}
  210 .Lnext_set_inv:
  211         orr     ip, s_max, i_max
  212 .Lnext_index_inv:
  213         mcr     p15, 0, ip, c7, c14, 2  /* Purge D cache SE with Set/Index */
  214         sub     ip, ip, i_inc
  215         tst     ip, i_max               /* Index 0 is last one */
  216         bne     .Lnext_index_inv                /* Next index */
  217         mcr     p15, 0, ip, c7, c14, 2  /* Purge D cache SE with Set/Index */
  218         subs    s_max, s_max, s_inc
  219         bpl     .Lnext_set_inv          /* Next set */
  220         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  221         bx      lr
  222 
  223 .Larm10_cache_data:
  224         .word   _C_LABEL(arm10_dcache_sets_max)
  225 
  226 /*
  227  * Context switch.
  228  *
  229  * These is the CPU-specific parts of the context switcher cpu_switch()
  230  * These functions actually perform the TTB reload.
  231  *
  232  * NOTE: Special calling convention
  233  *      r1, r4-r13 must be preserved
  234  */
  235 ENTRY(arm10_context_switch)
  236         /*
  237          * We can assume that the caches will only contain kernel addresses
  238          * at this point.  So no need to flush them again.
  239          */
  240         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  241         mcr     p15, 0, r0, c2, c0, 0   /* set the new TTB */
  242         mcr     p15, 0, r0, c8, c7, 0   /* and flush the I+D tlbs */
  243 
  244         /* Paranoia -- make sure the pipeline is empty. */
  245         nop
  246         nop
  247         nop
  248         bx      lr
  249 
  250         .bss
  251 
  252 /* XXX The following macros should probably be moved to asm.h */
  253 #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
  254 #define C_OBJECT(x)     _DATA_OBJECT(_C_LABEL(x))
  255 
  256 /*
  257  * Parameters for the cache cleaning code.  Note that the order of these
  258  * four variables is assumed in the code above.  Hence the reason for 
  259  * declaring them in the assembler file.
  260  */
  261         .align 0
  262 C_OBJECT(arm10_dcache_sets_max)
  263         .space  4
  264 C_OBJECT(arm10_dcache_index_max)
  265         .space  4
  266 C_OBJECT(arm10_dcache_sets_inc)
  267         .space  4
  268 C_OBJECT(arm10_dcache_index_inc)
  269         .space  4

Cache object: 44a6b31b1889c6d3520da3e69cac6be4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.