The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_arm10.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $   */
    2 
    3 /*-
    4  * Copyright (c) 2002 ARM Limited
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. The name of the company may not be used to endorse or promote
   16  *    products derived from this software without specific prior written
   17  *    permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   20  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   21  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   22  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   23  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  * ARM10 assembly functions for CPU / MMU / TLB specific operations
   32  *
   33  */
   34 
   35 #include <machine/asm.h>
   36 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc_asm_arm10.S 248361 2013-03-16 02:48:49Z andrew $");
   37 
   38 /*
   39  * Functions to set the MMU Translation Table Base register
   40  *
   41  * We need to clean and flush the cache as it uses virtual
   42  * addresses that are about to change.
   43  */
   44 ENTRY(arm10_setttb)
   45         stmfd   sp!, {r0, lr}
   46         bl      _C_LABEL(arm10_idcache_wbinv_all)
   47         ldmfd   sp!, {r0, lr}
   48 
   49         mcr     p15, 0, r0, c2, c0, 0   /* load new TTB */
   50 
   51         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLBs */
   52         bx      lr
   53 END(arm10_setttb)
   54 
   55 /*
   56  * TLB functions
   57  */
   58 ENTRY(arm10_tlb_flushID_SE)
   59         mcr     p15, 0, r0, c8, c6, 1   /* flush D tlb single entry */
   60         mcr     p15, 0, r0, c8, c5, 1   /* flush I tlb single entry */
   61         bx      lr
   62 END(arm10_tlb_flushID_SE)
   63 
   64 ENTRY(arm10_tlb_flushI_SE)
   65         mcr     p15, 0, r0, c8, c5, 1   /* flush I tlb single entry */
   66         bx      lr
   67 END(arm10_tlb_flushI_SE)
   68 
   69 /*
   70  * Cache operations.  For the entire cache we use the set/index
   71  * operations.
   72  */
   73         s_max   .req r0
   74         i_max   .req r1
   75         s_inc   .req r2
   76         i_inc   .req r3
   77 
   78 ENTRY_NP(arm10_icache_sync_range)
   79         ldr     ip, .Larm10_line_size
   80         cmp     r1, #0x4000
   81         bcs     .Larm10_icache_sync_all
   82         ldr     ip, [ip]
   83         sub     r3, ip, #1
   84         and     r2, r0, r3
   85         add     r1, r1, r2
   86         bic     r0, r0, r3
   87 .Larm10_sync_next:
   88         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
   89         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
   90         add     r0, r0, ip
   91         subs    r1, r1, ip
   92         bhi     .Larm10_sync_next
   93         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
   94         bx      lr
   95 END(arm10_icache_sync_range)
   96 
   97 ENTRY_NP(arm10_icache_sync_all)
   98 .Larm10_icache_sync_all:
   99         /*
  100          * We assume that the code here can never be out of sync with the
  101          * dcache, so that we can safely flush the Icache and fall through
  102          * into the Dcache cleaning code.
  103          */
  104         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
  105         /* Fall through to clean Dcache. */
  106 
  107 .Larm10_dcache_wb:
  108         ldr     ip, .Larm10_cache_data
  109         ldmia   ip, {s_max, i_max, s_inc, i_inc}
  110 .Lnext_set:
  111         orr     ip, s_max, i_max
  112 .Lnext_index:
  113         mcr     p15, 0, ip, c7, c10, 2  /* Clean D cache SE with Set/Index */
  114         subs    ip, ip, i_inc
  115         bhs     .Lnext_index            /* Next index */
  116         subs    s_max, s_max, s_inc
  117         bhs     .Lnext_set              /* Next set */
  118         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  119         bx      lr
  120 END(arm10_icache_sync_all)
  121 
  122 .Larm10_line_size:
  123         .word   _C_LABEL(arm_pdcache_line_size)
  124 
  125 ENTRY(arm10_dcache_wb_range)
  126         ldr     ip, .Larm10_line_size
  127         cmp     r1, #0x4000
  128         bcs     .Larm10_dcache_wb
  129         ldr     ip, [ip]
  130         sub     r3, ip, #1
  131         and     r2, r0, r3
  132         add     r1, r1, r2
  133         bic     r0, r0, r3
  134 .Larm10_wb_next:
  135         mcr     p15, 0, r0, c7, c10, 1  /* Clean D cache SE with VA */
  136         add     r0, r0, ip
  137         subs    r1, r1, ip
  138         bhi     .Larm10_wb_next
  139         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  140         bx      lr
  141 END(arm10_dcache_wb_range)
  142         
  143 ENTRY(arm10_dcache_wbinv_range)
  144         ldr     ip, .Larm10_line_size
  145         cmp     r1, #0x4000
  146         bcs     .Larm10_dcache_wbinv_all
  147         ldr     ip, [ip]
  148         sub     r3, ip, #1
  149         and     r2, r0, r3
  150         add     r1, r1, r2
  151         bic     r0, r0, r3
  152 .Larm10_wbinv_next:
  153         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  154         add     r0, r0, ip
  155         subs    r1, r1, ip
  156         bhi     .Larm10_wbinv_next
  157         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  158         bx      lr
  159 END(arm10_dcache_wbinv_range)
  160         
  161 /*
  162  * Note, we must not invalidate everything.  If the range is too big we
  163  * must use wb-inv of the entire cache.
  164  */
  165 ENTRY(arm10_dcache_inv_range)
  166         ldr     ip, .Larm10_line_size
  167         cmp     r1, #0x4000
  168         bcs     .Larm10_dcache_wbinv_all
  169         ldr     ip, [ip]
  170         sub     r3, ip, #1
  171         and     r2, r0, r3
  172         add     r1, r1, r2
  173         bic     r0, r0, r3
  174 .Larm10_inv_next:
  175         mcr     p15, 0, r0, c7, c6, 1   /* Invalidate D cache SE with VA */
  176         add     r0, r0, ip
  177         subs    r1, r1, ip
  178         bhi     .Larm10_inv_next
  179         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  180         bx      lr
  181 END(arm10_dcache_inv_range)
  182 
  183 ENTRY(arm10_idcache_wbinv_range)
  184         ldr     ip, .Larm10_line_size
  185         cmp     r1, #0x4000
  186         bcs     .Larm10_idcache_wbinv_all
  187         ldr     ip, [ip]
  188         sub     r3, ip, #1
  189         and     r2, r0, r3
  190         add     r1, r1, r2
  191         bic     r0, r0, r3
  192 .Larm10_id_wbinv_next:
  193         mcr     p15, 0, r0, c7, c5, 1   /* Invalidate I cache SE with VA */
  194         mcr     p15, 0, r0, c7, c14, 1  /* Purge D cache SE with VA */
  195         add     r0, r0, ip
  196         subs    r1, r1, ip
  197         bhi     .Larm10_id_wbinv_next
  198         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  199         bx      lr
  200 END(arm10_idcache_wbinv_range)
  201 
  202 ENTRY_NP(arm10_idcache_wbinv_all)
  203 .Larm10_idcache_wbinv_all:
  204         /*
  205          * We assume that the code here can never be out of sync with the
  206          * dcache, so that we can safely flush the Icache and fall through
  207          * into the Dcache purging code.
  208          */
  209         mcr     p15, 0, r0, c7, c5, 0   /* Flush I cache */
  210         /* Fall through to purge Dcache. */
  211 
  212 ENTRY(arm10_dcache_wbinv_all)
  213 .Larm10_dcache_wbinv_all:
  214         ldr     ip, .Larm10_cache_data
  215         ldmia   ip, {s_max, i_max, s_inc, i_inc}
  216 .Lnext_set_inv:
  217         orr     ip, s_max, i_max
  218 .Lnext_index_inv:
  219         mcr     p15, 0, ip, c7, c14, 2  /* Purge D cache SE with Set/Index */
  220         subs    ip, ip, i_inc
  221         bhs     .Lnext_index_inv                /* Next index */
  222         subs    s_max, s_max, s_inc
  223         bhs     .Lnext_set_inv          /* Next set */
  224         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  225         bx      lr
  226 END(arm10_idcache_wbinv_all)
  227 END(arm10_dcache_wbinv_all)
  228 
  229 .Larm10_cache_data:
  230         .word   _C_LABEL(arm10_dcache_sets_max)
  231 
  232 /*
  233  * Context switch.
  234  *
  235  * These is the CPU-specific parts of the context switcher cpu_switch()
  236  * These functions actually perform the TTB reload.
  237  *
  238  * NOTE: Special calling convention
  239  *      r1, r4-r13 must be preserved
  240  */
  241 ENTRY(arm10_context_switch)
  242         /*
  243          * We can assume that the caches will only contain kernel addresses
  244          * at this point.  So no need to flush them again.
  245          */
  246         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  247         mcr     p15, 0, r0, c2, c0, 0   /* set the new TTB */
  248         mcr     p15, 0, r0, c8, c7, 0   /* and flush the I+D tlbs */
  249 
  250         /* Paranoia -- make sure the pipeline is empty. */
  251         nop
  252         nop
  253         nop
  254         bx      lr
  255 END(arm10_context_switch)
  256 
  257         .bss
  258 
  259 /* XXX The following macros should probably be moved to asm.h */
  260 #define _DATA_OBJECT(x) .globl x; .type x,_ASM_TYPE_OBJECT; x:
  261 #define C_OBJECT(x)     _DATA_OBJECT(_C_LABEL(x))
  262 
  263 /*
  264  * Parameters for the cache cleaning code.  Note that the order of these
  265  * four variables is assumed in the code above.  Hence the reason for
  266  * declaring them in the assembler file.
  267  */
  268         .align 0
  269 C_OBJECT(arm10_dcache_sets_max)
  270         .space  4
  271 C_OBJECT(arm10_dcache_index_max)
  272         .space  4
  273 C_OBJECT(arm10_dcache_sets_inc)
  274         .space  4
  275 C_OBJECT(arm10_dcache_index_inc)
  276         .space  4

Cache object: 4d9d52a1050b1c3b8176e9998bf7ffcd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.