The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/include/cpufunc.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $    */
    2 
    3 /*-
    4  * Copyright (c) 1997 Mark Brinicombe.
    5  * Copyright (c) 1997 Causality Limited
    6  * All rights reserved.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Causality Limited.
   19  * 4. The name of Causality Limited may not be used to endorse or promote
   20  *    products derived from this software without specific prior written
   21  *    permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   33  * SUCH DAMAGE.
   34  *
   35  * RiscBSD kernel project
   36  *
   37  * cpufunc.h
   38  *
   39  * Prototypes for cpu, mmu and tlb related functions.
   40  *
   41  * $FreeBSD: releng/10.4/sys/arm/include/cpufunc.h 278635 2015-02-12 21:10:24Z ian $
   42  */
   43 
   44 #ifndef _MACHINE_CPUFUNC_H_
   45 #define _MACHINE_CPUFUNC_H_
   46 
   47 #ifdef _KERNEL
   48 
   49 #include <sys/types.h>
   50 #include <machine/cpuconf.h>
   51 #include <machine/katelib.h> /* For in[bwl] and out[bwl] */
   52 
   53 static __inline void
   54 breakpoint(void)
   55 {
   56         __asm(".word      0xe7ffffff");
   57 }
   58 
   59 struct cpu_functions {
   60 
   61         /* CPU functions */
   62         
   63         u_int   (*cf_id)                (void);
   64         void    (*cf_cpwait)            (void);
   65 
   66         /* MMU functions */
   67 
   68         u_int   (*cf_control)           (u_int bic, u_int eor);
   69         void    (*cf_domains)           (u_int domains);
   70         void    (*cf_setttb)            (u_int ttb);
   71         u_int   (*cf_faultstatus)       (void);
   72         u_int   (*cf_faultaddress)      (void);
   73 
   74         /* TLB functions */
   75 
   76         void    (*cf_tlb_flushID)       (void); 
   77         void    (*cf_tlb_flushID_SE)    (u_int va);     
   78         void    (*cf_tlb_flushI)        (void);
   79         void    (*cf_tlb_flushI_SE)     (u_int va);     
   80         void    (*cf_tlb_flushD)        (void);
   81         void    (*cf_tlb_flushD_SE)     (u_int va);     
   82 
   83         /*
   84          * Cache operations:
   85          *
   86          * We define the following primitives:
   87          *
   88          *      icache_sync_all         Synchronize I-cache
   89          *      icache_sync_range       Synchronize I-cache range
   90          *
   91          *      dcache_wbinv_all        Write-back and Invalidate D-cache
   92          *      dcache_wbinv_range      Write-back and Invalidate D-cache range
   93          *      dcache_inv_range        Invalidate D-cache range
   94          *      dcache_wb_range         Write-back D-cache range
   95          *
   96          *      idcache_wbinv_all       Write-back and Invalidate D-cache,
   97          *                              Invalidate I-cache
   98          *      idcache_wbinv_range     Write-back and Invalidate D-cache,
   99          *                              Invalidate I-cache range
  100          *
  101          * Note that the ARM term for "write-back" is "clean".  We use
  102          * the term "write-back" since it's a more common way to describe
  103          * the operation.
  104          *
  105          * There are some rules that must be followed:
  106          *
  107          *      ID-cache Invalidate All:
  108          *              Unlike other functions, this one must never write back.
  109          *              It is used to intialize the MMU when it is in an unknown
  110          *              state (such as when it may have lines tagged as valid
  111          *              that belong to a previous set of mappings).
  112          *                                          
  113          *      I-cache Synch (all or range):
  114          *              The goal is to synchronize the instruction stream,
  115          *              so you may beed to write-back dirty D-cache blocks
  116          *              first.  If a range is requested, and you can't
  117          *              synchronize just a range, you have to hit the whole
  118          *              thing.
  119          *
  120          *      D-cache Write-Back and Invalidate range:
  121          *              If you can't WB-Inv a range, you must WB-Inv the
  122          *              entire D-cache.
  123          *
  124          *      D-cache Invalidate:
  125          *              If you can't Inv the D-cache, you must Write-Back
  126          *              and Invalidate.  Code that uses this operation
  127          *              MUST NOT assume that the D-cache will not be written
  128          *              back to memory.
  129          *
  130          *      D-cache Write-Back:
  131          *              If you can't Write-back without doing an Inv,
  132          *              that's fine.  Then treat this as a WB-Inv.
  133          *              Skipping the invalidate is merely an optimization.
  134          *
  135          *      All operations:
  136          *              Valid virtual addresses must be passed to each
  137          *              cache operation.
  138          */
  139         void    (*cf_icache_sync_all)   (void);
  140         void    (*cf_icache_sync_range) (vm_offset_t, vm_size_t);
  141 
  142         void    (*cf_dcache_wbinv_all)  (void);
  143         void    (*cf_dcache_wbinv_range) (vm_offset_t, vm_size_t);
  144         void    (*cf_dcache_inv_range)  (vm_offset_t, vm_size_t);
  145         void    (*cf_dcache_wb_range)   (vm_offset_t, vm_size_t);
  146 
  147         void    (*cf_idcache_inv_all)   (void);
  148         void    (*cf_idcache_wbinv_all) (void);
  149         void    (*cf_idcache_wbinv_range) (vm_offset_t, vm_size_t);
  150         void    (*cf_l2cache_wbinv_all) (void);
  151         void    (*cf_l2cache_wbinv_range) (vm_offset_t, vm_size_t);
  152         void    (*cf_l2cache_inv_range)   (vm_offset_t, vm_size_t);
  153         void    (*cf_l2cache_wb_range)    (vm_offset_t, vm_size_t);
  154         void    (*cf_l2cache_drain_writebuf)      (void);
  155 
  156         /* Other functions */
  157 
  158         void    (*cf_flush_prefetchbuf) (void);
  159         void    (*cf_drain_writebuf)    (void);
  160         void    (*cf_flush_brnchtgt_C)  (void);
  161         void    (*cf_flush_brnchtgt_E)  (u_int va);
  162 
  163         void    (*cf_sleep)             (int mode);
  164 
  165         /* Soft functions */
  166 
  167         int     (*cf_dataabt_fixup)     (void *arg);
  168         int     (*cf_prefetchabt_fixup) (void *arg);
  169 
  170         void    (*cf_context_switch)    (void);
  171 
  172         void    (*cf_setup)             (char *string);
  173 };
  174 
  175 extern struct cpu_functions cpufuncs;
  176 extern u_int cputype;
  177 
  178 #define cpu_id()                cpufuncs.cf_id()
  179 #define cpu_cpwait()            cpufuncs.cf_cpwait()
  180 
  181 #define cpu_control(c, e)       cpufuncs.cf_control(c, e)
  182 #define cpu_domains(d)          cpufuncs.cf_domains(d)
  183 #define cpu_setttb(t)           cpufuncs.cf_setttb(t)
  184 #define cpu_faultstatus()       cpufuncs.cf_faultstatus()
  185 #define cpu_faultaddress()      cpufuncs.cf_faultaddress()
  186 
  187 #ifndef SMP
  188 
  189 #define cpu_tlb_flushID()       cpufuncs.cf_tlb_flushID()
  190 #define cpu_tlb_flushID_SE(e)   cpufuncs.cf_tlb_flushID_SE(e)
  191 #define cpu_tlb_flushI()        cpufuncs.cf_tlb_flushI()
  192 #define cpu_tlb_flushI_SE(e)    cpufuncs.cf_tlb_flushI_SE(e)
  193 #define cpu_tlb_flushD()        cpufuncs.cf_tlb_flushD()
  194 #define cpu_tlb_flushD_SE(e)    cpufuncs.cf_tlb_flushD_SE(e)
  195 
  196 #else
  197 void tlb_broadcast(int);
  198 
  199 #if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
  200 #define TLB_BROADCAST   /* No need to explicitely send an IPI */
  201 #else
  202 #define TLB_BROADCAST   tlb_broadcast(7)
  203 #endif
  204 
  205 #define cpu_tlb_flushID() do { \
  206         cpufuncs.cf_tlb_flushID(); \
  207         TLB_BROADCAST; \
  208 } while(0)
  209 
  210 #define cpu_tlb_flushID_SE(e) do { \
  211         cpufuncs.cf_tlb_flushID_SE(e); \
  212         TLB_BROADCAST; \
  213 } while(0)
  214 
  215 
  216 #define cpu_tlb_flushI() do { \
  217         cpufuncs.cf_tlb_flushI(); \
  218         TLB_BROADCAST; \
  219 } while(0)
  220 
  221 
  222 #define cpu_tlb_flushI_SE(e) do { \
  223         cpufuncs.cf_tlb_flushI_SE(e); \
  224         TLB_BROADCAST; \
  225 } while(0)
  226 
  227 
  228 #define cpu_tlb_flushD() do { \
  229         cpufuncs.cf_tlb_flushD(); \
  230         TLB_BROADCAST; \
  231 } while(0)
  232 
  233 
  234 #define cpu_tlb_flushD_SE(e) do { \
  235         cpufuncs.cf_tlb_flushD_SE(e); \
  236         TLB_BROADCAST; \
  237 } while(0)
  238 
  239 #endif
  240 
  241 #define cpu_icache_sync_all()   cpufuncs.cf_icache_sync_all()
  242 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
  243 
  244 #define cpu_dcache_wbinv_all()  cpufuncs.cf_dcache_wbinv_all()
  245 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
  246 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
  247 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
  248 
  249 #define cpu_idcache_inv_all()   cpufuncs.cf_idcache_inv_all()
  250 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
  251 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
  252 #define cpu_l2cache_wbinv_all() cpufuncs.cf_l2cache_wbinv_all()
  253 #define cpu_l2cache_wb_range(a, s) cpufuncs.cf_l2cache_wb_range((a), (s))
  254 #define cpu_l2cache_inv_range(a, s) cpufuncs.cf_l2cache_inv_range((a), (s))
  255 #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s))
  256 #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf()
  257 
  258 #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
  259 #define cpu_drain_writebuf()    cpufuncs.cf_drain_writebuf()
  260 #define cpu_flush_brnchtgt_C()  cpufuncs.cf_flush_brnchtgt_C()
  261 #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
  262 
  263 #define cpu_sleep(m)            cpufuncs.cf_sleep(m)
  264 
  265 #define cpu_dataabt_fixup(a)            cpufuncs.cf_dataabt_fixup(a)
  266 #define cpu_prefetchabt_fixup(a)        cpufuncs.cf_prefetchabt_fixup(a)
  267 #define ABORT_FIXUP_OK          0       /* fixup succeeded */
  268 #define ABORT_FIXUP_FAILED      1       /* fixup failed */
  269 #define ABORT_FIXUP_RETURN      2       /* abort handler should return */
  270 
  271 #define cpu_setup(a)                    cpufuncs.cf_setup(a)
  272 
  273 int     set_cpufuncs            (void);
  274 #define ARCHITECTURE_NOT_PRESENT        1       /* known but not configured */
  275 #define ARCHITECTURE_NOT_SUPPORTED      2       /* not known */
  276 
  277 void    cpufunc_nullop          (void);
  278 int     cpufunc_null_fixup      (void *);
  279 int     early_abort_fixup       (void *);
  280 int     late_abort_fixup        (void *);
  281 u_int   cpufunc_id              (void);
  282 u_int   cpufunc_cpuid           (void);
  283 u_int   cpufunc_control         (u_int clear, u_int bic);
  284 void    cpufunc_domains         (u_int domains);
  285 u_int   cpufunc_faultstatus     (void);
  286 u_int   cpufunc_faultaddress    (void);
  287 u_int   cpu_pfr                 (int);
  288 
  289 #if defined(CPU_FA526) || defined(CPU_FA626TE)
  290 void    fa526_setup             (char *arg);
  291 void    fa526_setttb            (u_int ttb);
  292 void    fa526_context_switch    (void);
  293 void    fa526_cpu_sleep         (int);
  294 void    fa526_tlb_flushI_SE     (u_int);
  295 void    fa526_tlb_flushID_SE    (u_int);
  296 void    fa526_flush_prefetchbuf (void);
  297 void    fa526_flush_brnchtgt_E  (u_int);
  298 
  299 void    fa526_icache_sync_all   (void);
  300 void    fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
  301 void    fa526_dcache_wbinv_all  (void);
  302 void    fa526_dcache_wbinv_range(vm_offset_t start, vm_size_t end);
  303 void    fa526_dcache_inv_range  (vm_offset_t start, vm_size_t end);
  304 void    fa526_dcache_wb_range   (vm_offset_t start, vm_size_t end);
  305 void    fa526_idcache_wbinv_all(void);
  306 void    fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
  307 #endif
  308 
  309 
  310 #ifdef CPU_ARM9
  311 void    arm9_setttb             (u_int);
  312 
  313 void    arm9_tlb_flushID_SE     (u_int va);
  314 
  315 void    arm9_icache_sync_all    (void);
  316 void    arm9_icache_sync_range  (vm_offset_t, vm_size_t);
  317 
  318 void    arm9_dcache_wbinv_all   (void);
  319 void    arm9_dcache_wbinv_range (vm_offset_t, vm_size_t);
  320 void    arm9_dcache_inv_range   (vm_offset_t, vm_size_t);
  321 void    arm9_dcache_wb_range    (vm_offset_t, vm_size_t);
  322 
  323 void    arm9_idcache_wbinv_all  (void);
  324 void    arm9_idcache_wbinv_range (vm_offset_t, vm_size_t);
  325 
  326 void    arm9_context_switch     (void);
  327 
  328 void    arm9_setup              (char *string);
  329 
  330 extern unsigned arm9_dcache_sets_max;
  331 extern unsigned arm9_dcache_sets_inc;
  332 extern unsigned arm9_dcache_index_max;
  333 extern unsigned arm9_dcache_index_inc;
  334 #endif
  335 
  336 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
  337 void    arm10_setttb            (u_int);
  338 
  339 void    arm10_tlb_flushID_SE    (u_int);
  340 void    arm10_tlb_flushI_SE     (u_int);
  341 
  342 void    arm10_icache_sync_all   (void);
  343 void    arm10_icache_sync_range (vm_offset_t, vm_size_t);
  344 
  345 void    arm10_dcache_wbinv_all  (void);
  346 void    arm10_dcache_wbinv_range (vm_offset_t, vm_size_t);
  347 void    arm10_dcache_inv_range  (vm_offset_t, vm_size_t);
  348 void    arm10_dcache_wb_range   (vm_offset_t, vm_size_t);
  349 
  350 void    arm10_idcache_wbinv_all (void);
  351 void    arm10_idcache_wbinv_range (vm_offset_t, vm_size_t);
  352 
  353 void    arm10_context_switch    (void);
  354 
  355 void    arm10_setup             (char *string);
  356 
  357 extern unsigned arm10_dcache_sets_max;
  358 extern unsigned arm10_dcache_sets_inc;
  359 extern unsigned arm10_dcache_index_max;
  360 extern unsigned arm10_dcache_index_inc;
  361 
  362 u_int   sheeva_control_ext              (u_int, u_int);
  363 void    sheeva_cpu_sleep                (int);
  364 void    sheeva_setttb                   (u_int);
  365 void    sheeva_dcache_wbinv_range       (vm_offset_t, vm_size_t);
  366 void    sheeva_dcache_inv_range         (vm_offset_t, vm_size_t);
  367 void    sheeva_dcache_wb_range          (vm_offset_t, vm_size_t);
  368 void    sheeva_idcache_wbinv_range      (vm_offset_t, vm_size_t);
  369 
  370 void    sheeva_l2cache_wbinv_range      (vm_offset_t, vm_size_t);
  371 void    sheeva_l2cache_inv_range        (vm_offset_t, vm_size_t);
  372 void    sheeva_l2cache_wb_range         (vm_offset_t, vm_size_t);
  373 void    sheeva_l2cache_wbinv_all        (void);
  374 #endif
  375 
  376 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) || \
  377         defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
  378 void    arm11_setttb            (u_int);
  379 void    arm11_sleep             (int);
  380 
  381 void    arm11_tlb_flushID_SE    (u_int);
  382 void    arm11_tlb_flushI_SE     (u_int);
  383 
  384 void    arm11_context_switch    (void);
  385 
  386 void    arm11_setup             (char *string);
  387 void    arm11_tlb_flushID       (void);
  388 void    arm11_tlb_flushI        (void);
  389 void    arm11_tlb_flushD        (void);
  390 void    arm11_tlb_flushD_SE     (u_int va);
  391 
  392 void    arm11_drain_writebuf    (void);
  393 
  394 void    pj4b_setttb                     (u_int);
  395 
  396 void    pj4b_drain_readbuf              (void);
  397 void    pj4b_flush_brnchtgt_all         (void);
  398 void    pj4b_flush_brnchtgt_va          (u_int);
  399 void    pj4b_sleep                      (int);
  400 
  401 void    armv6_icache_sync_all           (void);
  402 void    armv6_icache_sync_range         (vm_offset_t, vm_size_t);
  403 
  404 void    armv6_dcache_wbinv_all          (void);
  405 void    armv6_dcache_wbinv_range        (vm_offset_t, vm_size_t);
  406 void    armv6_dcache_inv_range          (vm_offset_t, vm_size_t);
  407 void    armv6_dcache_wb_range           (vm_offset_t, vm_size_t);
  408 
  409 void    armv6_idcache_inv_all           (void);
  410 void    armv6_idcache_wbinv_all         (void);
  411 void    armv6_idcache_wbinv_range       (vm_offset_t, vm_size_t);
  412 
  413 void    armv7_setttb                    (u_int);
  414 void    armv7_tlb_flushID               (void);
  415 void    armv7_tlb_flushID_SE            (u_int);
  416 void    armv7_icache_sync_all           (void);
  417 void    armv7_icache_sync_range         (vm_offset_t, vm_size_t);
  418 void    armv7_idcache_wbinv_range       (vm_offset_t, vm_size_t);
  419 void    armv7_idcache_inv_all           (void);
  420 void    armv7_dcache_wbinv_all          (void);
  421 void    armv7_idcache_wbinv_all         (void);
  422 void    armv7_dcache_wbinv_range        (vm_offset_t, vm_size_t);
  423 void    armv7_dcache_inv_range          (vm_offset_t, vm_size_t);
  424 void    armv7_dcache_wb_range           (vm_offset_t, vm_size_t);
  425 void    armv7_cpu_sleep                 (int);
  426 void    armv7_setup                     (char *string);
  427 void    armv7_context_switch            (void);
  428 void    armv7_drain_writebuf            (void);
  429 void    armv7_sev                       (void);
  430 void    armv7_sleep                     (int unused);
  431 u_int   armv7_auxctrl                   (u_int, u_int);
  432 void    pj4bv7_setup                    (char *string);
  433 void    pj4b_config                     (void);
  434 
  435 int     get_core_id                     (void);
  436 
  437 void    armadaxp_idcache_wbinv_all      (void);
  438 
  439 void    cortexa_setup                   (char *);
  440 #endif
  441 
  442 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
  443 void    arm11x6_setttb                  (u_int);
  444 void    arm11x6_idcache_wbinv_all       (void);
  445 void    arm11x6_dcache_wbinv_all        (void);
  446 void    arm11x6_icache_sync_all         (void);
  447 void    arm11x6_flush_prefetchbuf       (void);
  448 void    arm11x6_icache_sync_range       (vm_offset_t, vm_size_t);
  449 void    arm11x6_idcache_wbinv_range     (vm_offset_t, vm_size_t);
  450 void    arm11x6_setup                   (char *string);
  451 void    arm11x6_sleep                   (int);  /* no ref. for errata */
  452 #endif
  453 #if defined(CPU_ARM1136)
  454 void    arm1136_sleep_rev0              (int);  /* for errata 336501 */
  455 #endif
  456 
  457 #if defined(CPU_ARM9E) || defined (CPU_ARM10)
  458 void    armv5_ec_setttb(u_int);
  459 
  460 void    armv5_ec_icache_sync_all(void);
  461 void    armv5_ec_icache_sync_range(vm_offset_t, vm_size_t);
  462 
  463 void    armv5_ec_dcache_wbinv_all(void);
  464 void    armv5_ec_dcache_wbinv_range(vm_offset_t, vm_size_t);
  465 void    armv5_ec_dcache_inv_range(vm_offset_t, vm_size_t);
  466 void    armv5_ec_dcache_wb_range(vm_offset_t, vm_size_t);
  467 
  468 void    armv5_ec_idcache_wbinv_all(void);
  469 void    armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
  470 #endif
  471 
  472 #if defined (CPU_ARM10)
  473 void    armv5_setttb(u_int);
  474 
  475 void    armv5_icache_sync_all(void);
  476 void    armv5_icache_sync_range(vm_offset_t, vm_size_t);
  477 
  478 void    armv5_dcache_wbinv_all(void);
  479 void    armv5_dcache_wbinv_range(vm_offset_t, vm_size_t);
  480 void    armv5_dcache_inv_range(vm_offset_t, vm_size_t);
  481 void    armv5_dcache_wb_range(vm_offset_t, vm_size_t);
  482 
  483 void    armv5_idcache_wbinv_all(void);
  484 void    armv5_idcache_wbinv_range(vm_offset_t, vm_size_t);
  485 
  486 extern unsigned armv5_dcache_sets_max;
  487 extern unsigned armv5_dcache_sets_inc;
  488 extern unsigned armv5_dcache_index_max;
  489 extern unsigned armv5_dcache_index_inc;
  490 #endif
  491 
  492 #if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) ||    \
  493   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
  494   defined(CPU_FA526) || defined(CPU_FA626TE) ||                         \
  495   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
  496   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
  497 
  498 void    armv4_tlb_flushID       (void);
  499 void    armv4_tlb_flushI        (void);
  500 void    armv4_tlb_flushD        (void);
  501 void    armv4_tlb_flushD_SE     (u_int va);
  502 
  503 void    armv4_drain_writebuf    (void);
  504 void    armv4_idcache_inv_all   (void);
  505 #endif
  506 
  507 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||   \
  508   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||   \
  509   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
  510 void    xscale_cpwait           (void);
  511 
  512 void    xscale_cpu_sleep        (int mode);
  513 
  514 u_int   xscale_control          (u_int clear, u_int bic);
  515 
  516 void    xscale_setttb           (u_int ttb);
  517 
  518 void    xscale_tlb_flushID_SE   (u_int va);
  519 
  520 void    xscale_cache_flushID    (void);
  521 void    xscale_cache_flushI     (void);
  522 void    xscale_cache_flushD     (void);
  523 void    xscale_cache_flushD_SE  (u_int entry);
  524 
  525 void    xscale_cache_cleanID    (void);
  526 void    xscale_cache_cleanD     (void);
  527 void    xscale_cache_cleanD_E   (u_int entry);
  528 
  529 void    xscale_cache_clean_minidata (void);
  530 
  531 void    xscale_cache_purgeID    (void);
  532 void    xscale_cache_purgeID_E  (u_int entry);
  533 void    xscale_cache_purgeD     (void);
  534 void    xscale_cache_purgeD_E   (u_int entry);
  535 
  536 void    xscale_cache_syncI      (void);
  537 void    xscale_cache_cleanID_rng (vm_offset_t start, vm_size_t end);
  538 void    xscale_cache_cleanD_rng (vm_offset_t start, vm_size_t end);
  539 void    xscale_cache_purgeID_rng (vm_offset_t start, vm_size_t end);
  540 void    xscale_cache_purgeD_rng (vm_offset_t start, vm_size_t end);
  541 void    xscale_cache_syncI_rng  (vm_offset_t start, vm_size_t end);
  542 void    xscale_cache_flushD_rng (vm_offset_t start, vm_size_t end);
  543 
  544 void    xscale_context_switch   (void);
  545 
  546 void    xscale_setup            (char *string);
  547 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
  548            CPU_XSCALE_80219 */
  549 
  550 #ifdef  CPU_XSCALE_81342
  551 
  552 void    xscalec3_l2cache_purge  (void);
  553 void    xscalec3_cache_purgeID  (void);
  554 void    xscalec3_cache_purgeD   (void);
  555 void    xscalec3_cache_cleanID  (void);
  556 void    xscalec3_cache_cleanD   (void);
  557 void    xscalec3_cache_syncI    (void);
  558 
  559 void    xscalec3_cache_purgeID_rng      (vm_offset_t start, vm_size_t end);
  560 void    xscalec3_cache_purgeD_rng       (vm_offset_t start, vm_size_t end);
  561 void    xscalec3_cache_cleanID_rng      (vm_offset_t start, vm_size_t end);
  562 void    xscalec3_cache_cleanD_rng       (vm_offset_t start, vm_size_t end);
  563 void    xscalec3_cache_syncI_rng        (vm_offset_t start, vm_size_t end);
  564 
  565 void    xscalec3_l2cache_flush_rng      (vm_offset_t, vm_size_t);
  566 void    xscalec3_l2cache_clean_rng      (vm_offset_t start, vm_size_t end);
  567 void    xscalec3_l2cache_purge_rng      (vm_offset_t start, vm_size_t end);
  568 
  569 
  570 void    xscalec3_setttb         (u_int ttb);
  571 void    xscalec3_context_switch (void);
  572 
  573 #endif /* CPU_XSCALE_81342 */
  574 
  575 #define setttb          cpu_setttb
  576 #define drain_writebuf  cpu_drain_writebuf
  577 
  578 /*
  579  * Macros for manipulating CPU interrupts
  580  */
  581 static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__));
  582 
  583 static __inline u_int32_t
  584 __set_cpsr_c(u_int bic, u_int eor)
  585 {
  586         u_int32_t       tmp, ret;
  587 
  588         __asm __volatile(
  589                 "mrs     %0, cpsr\n"    /* Get the CPSR */
  590                 "bic     %1, %0, %2\n"  /* Clear bits */
  591                 "eor     %1, %1, %3\n"  /* XOR bits */
  592                 "msr     cpsr_c, %1\n"  /* Set the control field of CPSR */
  593         : "=&r" (ret), "=&r" (tmp)
  594         : "r" (bic), "r" (eor) : "memory");
  595 
  596         return ret;
  597 }
  598 
  599 #define ARM_CPSR_F32    (1 << 6)        /* FIQ disable */
  600 #define ARM_CPSR_I32    (1 << 7)        /* IRQ disable */
  601 
  602 #define disable_interrupts(mask)                                        \
  603         (__set_cpsr_c((mask) & (ARM_CPSR_I32 | ARM_CPSR_F32),           \
  604                       (mask) & (ARM_CPSR_I32 | ARM_CPSR_F32)))
  605 
  606 #define enable_interrupts(mask)                                         \
  607         (__set_cpsr_c((mask) & (ARM_CPSR_I32 | ARM_CPSR_F32), 0))
  608 
  609 #define restore_interrupts(old_cpsr)                                    \
  610         (__set_cpsr_c((ARM_CPSR_I32 | ARM_CPSR_F32),                    \
  611                       (old_cpsr) & (ARM_CPSR_I32 | ARM_CPSR_F32)))
  612 
  613 static __inline register_t
  614 intr_disable(void)
  615 {
  616         register_t s;
  617 
  618         s = disable_interrupts(ARM_CPSR_I32 | ARM_CPSR_F32);
  619         return (s);
  620 }
  621 
  622 static __inline void
  623 intr_restore(register_t s)
  624 {
  625 
  626         restore_interrupts(s);
  627 }
  628 
  629 /* Functions to manipulate the CPSR. */
  630 u_int   SetCPSR(u_int bic, u_int eor);
  631 u_int   GetCPSR(void);
  632 
  633 /*
  634  * Functions to manipulate cpu r13
  635  * (in arm/arm32/setstack.S)
  636  */
  637 
  638 void set_stackptr       (u_int mode, u_int address);
  639 u_int get_stackptr      (u_int mode);
  640 
  641 /*
  642  * Miscellany
  643  */
  644 
  645 int get_pc_str_offset   (void);
  646 
  647 /*
  648  * CPU functions from locore.S
  649  */
  650 
  651 void cpu_reset          (void) __attribute__((__noreturn__));
  652 
  653 /*
  654  * Cache info variables.
  655  */
  656 
  657 /* PRIMARY CACHE VARIABLES */
  658 extern int      arm_picache_size;
  659 extern int      arm_picache_line_size;
  660 extern int      arm_picache_ways;
  661 
  662 extern int      arm_pdcache_size;       /* and unified */
  663 extern int      arm_pdcache_line_size;
  664 extern int      arm_pdcache_ways;
  665 
  666 extern int      arm_pcache_type;
  667 extern int      arm_pcache_unified;
  668 
  669 extern int      arm_dcache_align;
  670 extern int      arm_dcache_align_mask;
  671 
  672 extern u_int    arm_cache_level;
  673 extern u_int    arm_cache_loc;
  674 extern u_int    arm_cache_type[14];
  675 
  676 #endif  /* _KERNEL */
  677 #endif  /* _MACHINE_CPUFUNC_H_ */
  678 
  679 /* End of cpufunc.h */

Cache object: fe49801d7158e1d3f1ef91a79bcc720c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.