The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/include/cpufunc.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $    */
    2 
    3 /*-
    4  * SPDX-License-Identifier: BSD-4-Clause
    5  *
    6  * Copyright (c) 1997 Mark Brinicombe.
    7  * Copyright (c) 1997 Causality Limited
    8  * All rights reserved.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed by Causality Limited.
   21  * 4. The name of Causality Limited may not be used to endorse or promote
   22  *    products derived from this software without specific prior written
   23  *    permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   26  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   27  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   28  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   35  * SUCH DAMAGE.
   36  *
   37  * RiscBSD kernel project
   38  *
   39  * cpufunc.h
   40  *
   41  * Prototypes for cpu, mmu and tlb related functions.
   42  *
   43  * $FreeBSD$
   44  */
   45 
   46 #ifndef _MACHINE_CPUFUNC_H_
   47 #define _MACHINE_CPUFUNC_H_
   48 
   49 #ifdef _KERNEL
   50 
   51 #include <sys/types.h>
   52 #include <machine/armreg.h>
   53 
   54 static __inline void
   55 breakpoint(void)
   56 {
   57         __asm(".word      0xe7ffffff"); /* udf 0xffff */
   58 }
   59 
   60 struct cpu_functions {
   61 
   62         /* CPU functions */
   63 #if __ARM_ARCH < 6
   64         void    (*cf_cpwait)            (void);
   65 
   66         /* MMU functions */
   67 
   68         u_int   (*cf_control)           (u_int bic, u_int eor);
   69         void    (*cf_setttb)            (u_int ttb);
   70 
   71         /* TLB functions */
   72 
   73         void    (*cf_tlb_flushID)       (void);
   74         void    (*cf_tlb_flushID_SE)    (u_int va);
   75         void    (*cf_tlb_flushD)        (void);
   76         void    (*cf_tlb_flushD_SE)     (u_int va);
   77 
   78         /*
   79          * Cache operations:
   80          *
   81          * We define the following primitives:
   82          *
   83          *      icache_sync_range       Synchronize I-cache range
   84          *
   85          *      dcache_wbinv_all        Write-back and Invalidate D-cache
   86          *      dcache_wbinv_range      Write-back and Invalidate D-cache range
   87          *      dcache_inv_range        Invalidate D-cache range
   88          *      dcache_wb_range         Write-back D-cache range
   89          *
   90          *      idcache_wbinv_all       Write-back and Invalidate D-cache,
   91          *                              Invalidate I-cache
   92          *      idcache_wbinv_range     Write-back and Invalidate D-cache,
   93          *                              Invalidate I-cache range
   94          *
   95          * Note that the ARM term for "write-back" is "clean".  We use
   96          * the term "write-back" since it's a more common way to describe
   97          * the operation.
   98          *
   99          * There are some rules that must be followed:
  100          *
  101          *      ID-cache Invalidate All:
  102          *              Unlike other functions, this one must never write back.
  103          *              It is used to intialize the MMU when it is in an unknown
  104          *              state (such as when it may have lines tagged as valid
  105          *              that belong to a previous set of mappings).
  106          *
  107          *      I-cache Sync range:
  108          *              The goal is to synchronize the instruction stream,
  109          *              so you may beed to write-back dirty D-cache blocks
  110          *              first.  If a range is requested, and you can't
  111          *              synchronize just a range, you have to hit the whole
  112          *              thing.
  113          *
  114          *      D-cache Write-Back and Invalidate range:
  115          *              If you can't WB-Inv a range, you must WB-Inv the
  116          *              entire D-cache.
  117          *
  118          *      D-cache Invalidate:
  119          *              If you can't Inv the D-cache, you must Write-Back
  120          *              and Invalidate.  Code that uses this operation
  121          *              MUST NOT assume that the D-cache will not be written
  122          *              back to memory.
  123          *
  124          *      D-cache Write-Back:
  125          *              If you can't Write-back without doing an Inv,
  126          *              that's fine.  Then treat this as a WB-Inv.
  127          *              Skipping the invalidate is merely an optimization.
  128          *
  129          *      All operations:
  130          *              Valid virtual addresses must be passed to each
  131          *              cache operation.
  132          */
  133         void    (*cf_icache_sync_range) (vm_offset_t, vm_size_t);
  134 
  135         void    (*cf_dcache_wbinv_all)  (void);
  136         void    (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t);
  137         void    (*cf_dcache_inv_range)  (vm_offset_t, vm_size_t);
  138         void    (*cf_dcache_wb_range)   (vm_offset_t, vm_size_t);
  139 
  140         void    (*cf_idcache_inv_all)   (void);
  141         void    (*cf_idcache_wbinv_all) (void);
  142         void    (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t);
  143 #endif
  144         void    (*cf_l2cache_wbinv_all) (void);
  145         void    (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t);
  146         void    (*cf_l2cache_inv_range)   (vm_offset_t, vm_size_t);
  147         void    (*cf_l2cache_wb_range)    (vm_offset_t, vm_size_t);
  148         void    (*cf_l2cache_drain_writebuf)      (void);
  149 
  150         /* Other functions */
  151 
  152 #if __ARM_ARCH < 6
  153         void    (*cf_drain_writebuf)    (void);
  154 #endif
  155 
  156         void    (*cf_sleep)             (int mode);
  157 
  158 #if __ARM_ARCH < 6
  159         /* Soft functions */
  160 
  161         void    (*cf_context_switch)    (void);
  162 #endif
  163 
  164         void    (*cf_setup)             (void);
  165 };
  166 
  167 extern struct cpu_functions cpufuncs;
  168 extern u_int cputype;
  169 
  170 #if __ARM_ARCH < 6
  171 #define cpu_cpwait()            cpufuncs.cf_cpwait()
  172 
  173 #define cpu_control(c, e)       cpufuncs.cf_control(c, e)
  174 #define cpu_setttb(t)           cpufuncs.cf_setttb(t)
  175 
  176 #define cpu_tlb_flushID()       cpufuncs.cf_tlb_flushID()
  177 #define cpu_tlb_flushID_SE(e)   cpufuncs.cf_tlb_flushID_SE(e)
  178 #define cpu_tlb_flushD()        cpufuncs.cf_tlb_flushD()
  179 #define cpu_tlb_flushD_SE(e)    cpufuncs.cf_tlb_flushD_SE(e)
  180 
  181 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
  182 
  183 #define cpu_dcache_wbinv_all()  cpufuncs.cf_dcache_wbinv_all()
  184 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
  185 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
  186 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
  187 
  188 #define cpu_idcache_inv_all()   cpufuncs.cf_idcache_inv_all()
  189 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
  190 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
  191 #endif
  192 
  193 #define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all()
  194 #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s))
  195 #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s))
  196 #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s))
  197 #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf()
  198 
  199 #if __ARM_ARCH < 6
  200 #define cpu_drain_writebuf()    cpufuncs.cf_drain_writebuf()
  201 #endif
  202 #define cpu_sleep(m)            cpufuncs.cf_sleep(m)
  203 
  204 #define cpu_setup()                     cpufuncs.cf_setup()
  205 
  206 int     set_cpufuncs            (void);
  207 #define ARCHITECTURE_NOT_PRESENT        1       /* known but not configured */
  208 #define ARCHITECTURE_NOT_SUPPORTED      2       /* not known */
  209 
  210 void    cpufunc_nullop          (void);
  211 u_int   cpufunc_control         (u_int clear, u_int bic);
  212 void    cpu_domains             (u_int domains);
  213 
  214 #if defined(CPU_ARM9E)
  215 void    arm9_tlb_flushID_SE     (u_int va);
  216 void    arm9_context_switch     (void);
  217 
  218 u_int   sheeva_control_ext              (u_int, u_int);
  219 void    sheeva_cpu_sleep                (int);
  220 void    sheeva_setttb                   (u_int);
  221 void    sheeva_dcache_wbinv_range       (vm_offset_t, vm_size_t);
  222 void    sheeva_dcache_inv_range         (vm_offset_t, vm_size_t);
  223 void    sheeva_dcache_wb_range          (vm_offset_t, vm_size_t);
  224 void    sheeva_idcache_wbinv_range      (vm_offset_t, vm_size_t);
  225 
  226 void    sheeva_l2cache_wbinv_range      (vm_offset_t, vm_size_t);
  227 void    sheeva_l2cache_inv_range        (vm_offset_t, vm_size_t);
  228 void    sheeva_l2cache_wb_range         (vm_offset_t, vm_size_t);
  229 void    sheeva_l2cache_wbinv_all        (void);
  230 #endif
  231 
  232 #if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
  233 void    armv7_cpu_sleep                 (int);
  234 #endif
  235 #if defined(CPU_MV_PJ4B)
  236 void    pj4b_config                     (void);
  237 #endif
  238 
  239 #if defined(CPU_ARM1176)
  240 void    arm11x6_sleep                   (int);  /* no ref. for errata */
  241 #endif
  242 
  243 #if defined(CPU_ARM9E)
  244 void    armv5_ec_setttb(u_int);
  245 
  246 void    armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
  247 
  248 void    armv5_ec_dcache_wbinv_all(void);
  249 void    armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
  250 void    armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
  251 void    armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
  252 
  253 void    armv5_ec_idcache_wbinv_all(void);
  254 void    armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
  255 
  256 void    armv4_tlb_flushID       (void);
  257 void    armv4_tlb_flushD        (void);
  258 void    armv4_tlb_flushD_SE     (u_int va);
  259 
  260 void    armv4_drain_writebuf    (void);
  261 void    armv4_idcache_inv_all   (void);
  262 #endif
  263 
  264 /*
  265  * Macros for manipulating CPU interrupts
  266  */
  267 #if __ARM_ARCH < 6
  268 #define __ARM_INTR_BITS         (PSR_I | PSR_F)
  269 #else
  270 #define __ARM_INTR_BITS         (PSR_I | PSR_F | PSR_A)
  271 #endif
  272 
  273 static __inline uint32_t
  274 __set_cpsr(uint32_t bic, uint32_t eor)
  275 {
  276         uint32_t        tmp, ret;
  277 
  278         __asm __volatile(
  279                 "mrs     %0, cpsr\n"            /* Get the CPSR */
  280                 "bic     %1, %0, %2\n"          /* Clear bits */
  281                 "eor     %1, %1, %3\n"          /* XOR bits */
  282                 "msr     cpsr_xc, %1\n"         /* Set the CPSR */
  283         : "=&r" (ret), "=&r" (tmp)
  284         : "r" (bic), "r" (eor) : "memory");
  285 
  286         return ret;
  287 }
  288 
  289 static __inline uint32_t
  290 disable_interrupts(uint32_t mask)
  291 {
  292 
  293         return (__set_cpsr(mask & __ARM_INTR_BITS, mask & __ARM_INTR_BITS));
  294 }
  295 
  296 static __inline uint32_t
  297 enable_interrupts(uint32_t mask)
  298 {
  299 
  300         return (__set_cpsr(mask & __ARM_INTR_BITS, 0));
  301 }
  302 
  303 static __inline uint32_t
  304 restore_interrupts(uint32_t old_cpsr)
  305 {
  306 
  307         return (__set_cpsr(__ARM_INTR_BITS, old_cpsr & __ARM_INTR_BITS));
  308 }
  309 
  310 static __inline register_t
  311 intr_disable(void)
  312 {
  313 
  314         return (disable_interrupts(PSR_I | PSR_F));
  315 }
  316 
  317 static __inline void
  318 intr_restore(register_t s)
  319 {
  320 
  321         restore_interrupts(s);
  322 }
  323 #undef __ARM_INTR_BITS
  324 
  325 /*
  326  * Functions to manipulate cpu r13
  327  * (in arm/arm32/setstack.S)
  328  */
  329 
  330 void set_stackptr       (u_int mode, u_int address);
  331 u_int get_stackptr      (u_int mode);
  332 
  333 /*
  334  * CPU functions from locore.S
  335  */
  336 
  337 void cpu_reset          (void) __attribute__((__noreturn__));
  338 
  339 /*
  340  * Cache info variables.
  341  */
  342 
  343 /* PRIMARY CACHE VARIABLES */
  344 extern int      arm_picache_size;
  345 extern int      arm_picache_line_size;
  346 extern int      arm_picache_ways;
  347 
  348 extern int      arm_pdcache_size;       /* and unified */
  349 extern int      arm_pdcache_line_size;
  350 extern int      arm_pdcache_ways;
  351 
  352 extern int      arm_pcache_type;
  353 extern int      arm_pcache_unified;
  354 
  355 extern int      arm_dcache_align;
  356 extern int      arm_dcache_align_mask;
  357 
  358 extern u_int    arm_cache_level;
  359 extern u_int    arm_cache_loc;
  360 extern u_int    arm_cache_type[14];
  361 
  362 #else   /* !_KERNEL */
  363 
  364 static __inline void
  365 breakpoint(void)
  366 {
  367 
  368         /*
  369          * This matches the instruction used by GDB for software
  370          * breakpoints.
  371          */
  372         __asm(".word      0xe7ffdefe"); /* udf 0xfdee */
  373 }
  374 
  375 #endif  /* _KERNEL */
  376 #endif  /* _MACHINE_CPUFUNC_H_ */
  377 
  378 /* End of cpufunc.h */

Cache object: 6d19b0629957c4251594dcaa6cedd2b3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.