The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc_asm_sheeva.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
    3  * All rights reserved.
    4  *
    5  * Developed by Semihalf.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 3. Neither the name of MARVELL nor the names of contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #include <machine/asm.h>
   33 __FBSDID("$FreeBSD: releng/8.1/sys/arm/arm/cpufunc_asm_sheeva.S 191141 2009-04-16 11:21:52Z raj $");
   34 
   35 #include <machine/param.h>
   36 
   37 .Lsheeva_cache_line_size:
   38         .word   _C_LABEL(arm_pdcache_line_size)
   39 .Lsheeva_asm_page_mask:
   40         .word   _C_LABEL(PAGE_MASK)
   41 
   42 ENTRY(sheeva_setttb)
   43         /* Disable irqs */
   44         mrs     r2, cpsr
   45         orr     r3, r2, #I32_bit | F32_bit
   46         msr     cpsr_c, r3
   47 
   48         mov     r1, #0
   49         mcr     p15, 0, r1, c7, c5, 0   /* Invalidate ICache */
   50 1:      mrc     p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
   51         bne     1b                      /* More to do? */
   52 
   53         mcr     p15, 1, r1, c15, c9, 0  /* Clean L2 */
   54         mcr     p15, 1, r1, c15, c11, 0 /* Invalidate L2 */
   55 
   56         /* Reenable irqs */
   57         msr     cpsr_c, r2
   58 
   59         mcr     p15, 0, r1, c7, c10, 4  /* drain the write buffer */
   60 
   61         mcr     p15, 0, r0, c2, c0, 0   /* load new TTB */
   62 
   63         mcr     p15, 0, r0, c8, c7, 0   /* invalidate I+D TLBs */
   64         RET
   65 
   66 ENTRY(sheeva_dcache_wbinv_range)
   67         str     lr, [sp, #-4]!
   68         mrs     lr, cpsr
   69         /* Start with cache line aligned address */
   70         ldr     ip, .Lsheeva_cache_line_size
   71         ldr     ip, [ip]
   72         sub     ip, ip, #1
   73         and     r2, r0, ip
   74         add     r1, r1, r2
   75         add     r1, r1, ip
   76         bics    r1, r1, ip
   77         bics    r0, r0, ip
   78 
   79         ldr     ip, .Lsheeva_asm_page_mask
   80         and     r2, r0, ip
   81         rsb     r2, r2, #PAGE_SIZE
   82         cmp     r1, r2
   83         movcc   ip, r1
   84         movcs   ip, r2
   85 1:
   86         add     r3, r0, ip
   87         sub     r2, r3, #1
   88         /* Disable irqs */
   89         orr     r3, lr, #I32_bit | F32_bit
   90         msr     cpsr_c, r3
   91         mcr     p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
   92         mcr     p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
   93         /* Enable irqs */
   94         msr     cpsr_c, lr
   95 
   96         add     r0, r0, ip
   97         sub     r1, r1, ip
   98         cmp     r1, #PAGE_SIZE
   99         movcc   ip, r1
  100         movcs   ip, #PAGE_SIZE
  101         cmp     r1, #0
  102         bne     1b
  103         mov     r0, #0
  104         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  105         ldr     lr, [sp], #4
  106         RET
  107 
  108 ENTRY(sheeva_idcache_wbinv_range)
  109         str     lr, [sp, #-4]!
  110         mrs     lr, cpsr
  111         /* Start with cache line aligned address */
  112         ldr     ip, .Lsheeva_cache_line_size
  113         ldr     ip, [ip]
  114         sub     ip, ip, #1
  115         and     r2, r0, ip
  116         add     r1, r1, r2
  117         add     r1, r1, ip
  118         bics    r1, r1, ip
  119         bics    r0, r0, ip
  120 
  121         ldr     ip, .Lsheeva_asm_page_mask
  122         and     r2, r0, ip
  123         rsb     r2, r2, #PAGE_SIZE
  124         cmp     r1, r2
  125         movcc   ip, r1
  126         movcs   ip, r2
  127 1:
  128         add     r3, r0, ip
  129         sub     r2, r3, #1
  130         /* Disable irqs */
  131         orr     r3, lr, #I32_bit | F32_bit
  132         msr     cpsr_c, r3
  133         mcr     p15, 5, r0, c15, c15, 0 /* Clean and inv zone start address */
  134         mcr     p15, 5, r2, c15, c15, 1 /* Clean and inv zone end address */
  135         /* Enable irqs */
  136         msr     cpsr_c, lr
  137 
  138         /* Invalidate and clean icache line by line */
  139         ldr     r3, .Lsheeva_cache_line_size
  140         ldr     r3, [r3]
  141 2:
  142         mcr     p15, 0, r0, c7, c5, 1
  143         add     r0, r0, r3
  144         cmp     r2, r0
  145         bhi     2b
  146 
  147         add     r0, r2, #1
  148         sub     r1, r1, ip
  149         cmp     r1, #PAGE_SIZE
  150         movcc   ip, r1
  151         movcs   ip, #PAGE_SIZE
  152         cmp     r1, #0
  153         bne     1b
  154         mov     r0, #0
  155         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  156         ldr     lr, [sp], #4
  157         RET
  158 
  159 ENTRY(sheeva_dcache_inv_range)
  160         str     lr, [sp, #-4]!
  161         mrs     lr, cpsr
  162         /* Start with cache line aligned address */
  163         ldr     ip, .Lsheeva_cache_line_size
  164         ldr     ip, [ip]
  165         sub     ip, ip, #1
  166         and     r2, r0, ip
  167         add     r1, r1, r2
  168         add     r1, r1, ip
  169         bics    r1, r1, ip
  170         bics    r0, r0, ip
  171 
  172         ldr     ip, .Lsheeva_asm_page_mask
  173         and     r2, r0, ip
  174         rsb     r2, r2, #PAGE_SIZE
  175         cmp     r1, r2
  176         movcc   ip, r1
  177         movcs   ip, r2
  178 1:
  179         add     r3, r0, ip
  180         sub     r2, r3, #1
  181         /* Disable irqs */
  182         orr     r3, lr, #I32_bit | F32_bit
  183         msr     cpsr_c, r3
  184         mcr     p15, 5, r0, c15, c14, 0 /* Inv zone start address */
  185         mcr     p15, 5, r2, c15, c14, 1 /* Inv zone end address */
  186         /* Enable irqs */
  187         msr     cpsr_c, lr
  188 
  189         add     r0, r0, ip
  190         sub     r1, r1, ip
  191         cmp     r1, #PAGE_SIZE
  192         movcc   ip, r1
  193         movcs   ip, #PAGE_SIZE
  194         cmp     r1, #0
  195         bne     1b
  196         mov     r0, #0
  197         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  198         ldr     lr, [sp], #4
  199         RET
  200 
  201 ENTRY(sheeva_dcache_wb_range)
  202         str     lr, [sp, #-4]!
  203         mrs     lr, cpsr
  204         /* Start with cache line aligned address */
  205         ldr     ip, .Lsheeva_cache_line_size
  206         ldr     ip, [ip]
  207         sub     ip, ip, #1
  208         and     r2, r0, ip
  209         add     r1, r1, r2
  210         add     r1, r1, ip
  211         bics    r1, r1, ip
  212         bics    r0, r0, ip
  213 
  214         ldr     ip, .Lsheeva_asm_page_mask
  215         and     r2, r0, ip
  216         rsb     r2, r2, #PAGE_SIZE
  217         cmp     r1, r2
  218         movcc   ip, r1
  219         movcs   ip, r2
  220 1:
  221         add     r3, r0, ip
  222         sub     r2, r3, #1
  223         /* Disable irqs */
  224         orr     r3, lr, #I32_bit | F32_bit
  225         msr     cpsr_c, r3
  226         mcr     p15, 5, r0, c15, c13, 0 /* Clean zone start address */
  227         mcr     p15, 5, r2, c15, c13, 1 /* Clean zone end address */
  228         /* Enable irqs */
  229         msr     cpsr_c, lr
  230 
  231         add     r0, r0, ip
  232         sub     r1, r1, ip
  233         cmp     r1, #PAGE_SIZE
  234         movcc   ip, r1
  235         movcs   ip, #PAGE_SIZE
  236         cmp     r1, #0
  237         bne     1b
  238         mov     r0, #0
  239         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  240         ldr     lr, [sp], #4
  241         RET
  242 
  243 ENTRY(sheeva_l2cache_wbinv_range)
  244         str     lr, [sp, #-4]!
  245         mrs     lr, cpsr
  246         /* Start with cache line aligned address */
  247         ldr     ip, .Lsheeva_cache_line_size
  248         ldr     ip, [ip]
  249         sub     ip, ip, #1
  250         and     r2, r0, ip
  251         add     r1, r1, r2
  252         add     r1, r1, ip
  253         bics    r1, r1, ip
  254         bics    r0, r0, ip
  255 
  256         ldr     ip, .Lsheeva_asm_page_mask
  257         and     r2, r0, ip
  258         rsb     r2, r2, #PAGE_SIZE
  259         cmp     r1, r2
  260         movcc   ip, r1
  261         movcs   ip, r2
  262 1:
  263         add     r3, r0, ip
  264         sub     r2, r3, #1
  265         /* Disable irqs */
  266         orr     r3, lr, #I32_bit | F32_bit
  267         msr     cpsr_c, r3
  268         mcr     p15, 1, r0, c15, c9, 4  /* Clean L2 zone start address */
  269         mcr     p15, 1, r2, c15, c9, 5  /* Clean L2 zone end address */
  270         mcr     p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
  271         mcr     p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
  272         /* Enable irqs */
  273         msr     cpsr_c, lr
  274 
  275         add     r0, r0, ip
  276         sub     r1, r1, ip
  277         cmp     r1, #PAGE_SIZE
  278         movcc   ip, r1
  279         movcs   ip, #PAGE_SIZE
  280         cmp     r1, #0
  281         bne     1b
  282         mov     r0, #0
  283         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  284         ldr     lr, [sp], #4
  285         RET
  286 
  287 ENTRY(sheeva_l2cache_inv_range)
  288         str     lr, [sp, #-4]!
  289         mrs     lr, cpsr
  290         /* Start with cache line aligned address */
  291         ldr     ip, .Lsheeva_cache_line_size
  292         ldr     ip, [ip]
  293         sub     ip, ip, #1
  294         and     r2, r0, ip
  295         add     r1, r1, r2
  296         add     r1, r1, ip
  297         bics    r1, r1, ip
  298         bics    r0, r0, ip
  299 
  300         ldr     ip, .Lsheeva_asm_page_mask
  301         and     r2, r0, ip
  302         rsb     r2, r2, #PAGE_SIZE
  303         cmp     r1, r2
  304         movcc   ip, r1
  305         movcs   ip, r2
  306 1:
  307         add     r3, r0, ip
  308         sub     r2, r3, #1
  309         /* Disable irqs */
  310         orr     r3, lr, #I32_bit | F32_bit
  311         msr     cpsr_c, r3
  312         mcr     p15, 1, r0, c15, c11, 4 /* Inv L2 zone start address */
  313         mcr     p15, 1, r2, c15, c11, 5 /* Inv L2 zone end address */
  314         /* Enable irqs */
  315         msr     cpsr_c, lr
  316 
  317         add     r0, r0, ip
  318         sub     r1, r1, ip
  319         cmp     r1, #PAGE_SIZE
  320         movcc   ip, r1
  321         movcs   ip, #PAGE_SIZE
  322         cmp     r1, #0
  323         bne     1b
  324         mov     r0, #0
  325         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  326         ldr     lr, [sp], #4
  327         RET
  328 
  329 ENTRY(sheeva_l2cache_wb_range)
  330         str     lr, [sp, #-4]!
  331         mrs     lr, cpsr
  332         /* Start with cache line aligned address */
  333         ldr     ip, .Lsheeva_cache_line_size
  334         ldr     ip, [ip]
  335         sub     ip, ip, #1
  336         and     r2, r0, ip
  337         add     r1, r1, r2
  338         add     r1, r1, ip
  339         bics    r1, r1, ip
  340         bics    r0, r0, ip
  341 
  342         ldr     ip, .Lsheeva_asm_page_mask
  343         and     r2, r0, ip
  344         rsb     r2, r2, #PAGE_SIZE
  345         cmp     r1, r2
  346         movcc   ip, r1
  347         movcs   ip, r2
  348 1:
  349         add     r3, r0, ip
  350         sub     r2, r3, #1
  351         /* Disable irqs */
  352         orr     r3, lr, #I32_bit | F32_bit
  353         msr     cpsr_c, r3
  354         mcr     p15, 1, r0, c15, c9, 4  /* Clean L2 zone start address */
  355         mcr     p15, 1, r2, c15, c9, 5  /* Clean L2 zone end address */
  356         /* Enable irqs */
  357         msr     cpsr_c, lr
  358 
  359         add     r0, r0, ip
  360         sub     r1, r1, ip
  361         cmp     r1, #PAGE_SIZE
  362         movcc   ip, r1
  363         movcs   ip, #PAGE_SIZE
  364         cmp     r1, #0
  365         bne     1b
  366         mov     r0, #0
  367         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  368         ldr     lr, [sp], #4
  369         RET
  370 
  371 ENTRY(sheeva_l2cache_wbinv_all)
  372         mov     r0, #0
  373         mcr     p15, 1, r0, c15, c9, 0  /* Clean L2 */
  374         mcr     p15, 1, r0, c15, c11, 0 /* Invalidate L2 */
  375         mcr     p15, 0, r0, c7, c10, 4  /* drain the write buffer */
  376         RET
  377 
  378 /* This function modifies register value as follows:
  379  *
  380  * arg1  arg            EFFECT (bit value saved into register)
  381  *    0     0           not changed
  382  *    0     1           negated
  383  *    1     0           cleared
  384  *    1     1           set
  385  */
  386 ENTRY(sheeva_control_ext)
  387         mrc     p15, 1, r3, c15, c1, 0  /* Read the control register */
  388         bic     r2, r3, r0              /* Clear bits */
  389         eor     r2, r2, r1              /* XOR bits */
  390 
  391         teq     r2, r3                  /* Only write if there is a change */
  392         mcrne   p15, 1, r2, c15, c1, 0  /* Write new control register */
  393         mov     r0, r3                  /* Return old value */
  394         RET

Cache object: 65c148f04cbb03191f7b252afecf8938


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.