The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
    2 
    3 /*-
    4  * arm9 support code Copyright (C) 2001 ARM Ltd
    5  * Copyright (c) 1997 Mark Brinicombe.
    6  * Copyright (c) 1997 Causality Limited
    7  * All rights reserved.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by Causality Limited.
   20  * 4. The name of Causality Limited may not be used to endorse or promote
   21  *    products derived from this software without specific prior written
   22  *    permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  * RiscBSD kernel project
   37  *
   38  * cpufuncs.c
   39  *
   40  * C functions for supporting CPU / MMU / TLB specific operations.
   41  *
   42  * Created      : 30/01/97
   43  */
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/cpufunc.c 283335 2015-05-23 22:48:54Z ian $");
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/lock.h>
   50 #include <sys/mutex.h>
   51 #include <sys/bus.h>
   52 #include <machine/bus.h>
   53 #include <machine/cpu.h>
   54 #include <machine/disassem.h>
   55 
   56 #include <vm/vm.h>
   57 #include <vm/pmap.h>
   58 #include <vm/uma.h>
   59 
   60 #include <machine/cpuconf.h>
   61 #include <machine/cpufunc.h>
   62 #include <machine/bootconfig.h>
   63 
   64 #ifdef CPU_XSCALE_80200
   65 #include <arm/xscale/i80200/i80200reg.h>
   66 #include <arm/xscale/i80200/i80200var.h>
   67 #endif
   68 
   69 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
   70 #include <arm/xscale/i80321/i80321reg.h>
   71 #include <arm/xscale/i80321/i80321var.h>
   72 #endif
   73 
   74 /*
   75  * Some definitions in i81342reg.h clash with i80321reg.h.
   76  * This only happens for the LINT kernel. As it happens,
   77  * we don't need anything from i81342reg.h that we already
   78  * got from somewhere else during a LINT compile.
   79  */
   80 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
   81 #include <arm/xscale/i8134x/i81342reg.h>
   82 #endif
   83 
   84 #ifdef CPU_XSCALE_IXP425
   85 #include <arm/xscale/ixp425/ixp425reg.h>
   86 #include <arm/xscale/ixp425/ixp425var.h>
   87 #endif
   88 
   89 /* PRIMARY CACHE VARIABLES */
   90 int     arm_picache_size;
   91 int     arm_picache_line_size;
   92 int     arm_picache_ways;
   93 
   94 int     arm_pdcache_size;       /* and unified */
   95 int     arm_pdcache_line_size;
   96 int     arm_pdcache_ways;
   97 
   98 int     arm_pcache_type;
   99 int     arm_pcache_unified;
  100 
  101 int     arm_dcache_align;
  102 int     arm_dcache_align_mask;
  103 
  104 u_int   arm_cache_level;
  105 u_int   arm_cache_type[14];
  106 u_int   arm_cache_loc;
  107 
  108 /* 1 == use cpu_sleep(), 0 == don't */
  109 int cpu_do_powersave;
  110 int ctrl;
  111 
  112 #ifdef CPU_ARM9
  113 struct cpu_functions arm9_cpufuncs = {
  114         /* CPU functions */
  115 
  116         cpufunc_id,                     /* id                   */
  117         cpufunc_nullop,                 /* cpwait               */
  118 
  119         /* MMU functions */
  120 
  121         cpufunc_control,                /* control              */
  122         cpufunc_domains,                /* Domain               */
  123         arm9_setttb,                    /* Setttb               */
  124         cpufunc_faultstatus,            /* Faultstatus          */
  125         cpufunc_faultaddress,           /* Faultaddress         */
  126 
  127         /* TLB functions */
  128 
  129         armv4_tlb_flushID,              /* tlb_flushID          */
  130         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
  131         armv4_tlb_flushI,               /* tlb_flushI           */
  132         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  133         armv4_tlb_flushD,               /* tlb_flushD           */
  134         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  135 
  136         /* Cache operations */
  137 
  138         arm9_icache_sync_all,           /* icache_sync_all      */
  139         arm9_icache_sync_range,         /* icache_sync_range    */
  140 
  141         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
  142         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
  143         arm9_dcache_inv_range,          /* dcache_inv_range     */
  144         arm9_dcache_wb_range,           /* dcache_wb_range      */
  145 
  146         armv4_idcache_inv_all,          /* idcache_inv_all      */
  147         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
  148         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
  149         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  150         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  151         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  152         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  153         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  154 
  155         /* Other functions */
  156 
  157         cpufunc_nullop,                 /* flush_prefetchbuf    */
  158         armv4_drain_writebuf,           /* drain_writebuf       */
  159         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  160         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  161 
  162         (void *)cpufunc_nullop,         /* sleep                */
  163 
  164         /* Soft functions */
  165 
  166         cpufunc_null_fixup,             /* dataabt_fixup        */
  167         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  168 
  169         arm9_context_switch,            /* context_switch       */
  170 
  171         arm9_setup                      /* cpu setup            */
  172 
  173 };
  174 #endif /* CPU_ARM9 */
  175 
  176 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
  177 struct cpu_functions armv5_ec_cpufuncs = {
  178         /* CPU functions */
  179 
  180         cpufunc_id,                     /* id                   */
  181         cpufunc_nullop,                 /* cpwait               */
  182 
  183         /* MMU functions */
  184 
  185         cpufunc_control,                /* control              */
  186         cpufunc_domains,                /* Domain               */
  187         armv5_ec_setttb,                /* Setttb               */
  188         cpufunc_faultstatus,            /* Faultstatus          */
  189         cpufunc_faultaddress,           /* Faultaddress         */
  190 
  191         /* TLB functions */
  192 
  193         armv4_tlb_flushID,              /* tlb_flushID          */
  194         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  195         armv4_tlb_flushI,               /* tlb_flushI           */
  196         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  197         armv4_tlb_flushD,               /* tlb_flushD           */
  198         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  199 
  200         /* Cache operations */
  201 
  202         armv5_ec_icache_sync_all,       /* icache_sync_all      */
  203         armv5_ec_icache_sync_range,     /* icache_sync_range    */
  204 
  205         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
  206         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
  207         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
  208         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
  209 
  210         armv4_idcache_inv_all,          /* idcache_inv_all      */
  211         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
  212         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
  213 
  214         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  215         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  216         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  217         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  218         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  219 
  220         /* Other functions */
  221 
  222         cpufunc_nullop,                 /* flush_prefetchbuf    */
  223         armv4_drain_writebuf,           /* drain_writebuf       */
  224         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  225         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  226 
  227         (void *)cpufunc_nullop,         /* sleep                */
  228 
  229         /* Soft functions */
  230 
  231         cpufunc_null_fixup,             /* dataabt_fixup        */
  232         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  233 
  234         arm10_context_switch,           /* context_switch       */
  235 
  236         arm10_setup                     /* cpu setup            */
  237 
  238 };
  239 
  240 struct cpu_functions sheeva_cpufuncs = {
  241         /* CPU functions */
  242 
  243         cpufunc_id,                     /* id                   */
  244         cpufunc_nullop,                 /* cpwait               */
  245 
  246         /* MMU functions */
  247 
  248         cpufunc_control,                /* control              */
  249         cpufunc_domains,                /* Domain               */
  250         sheeva_setttb,                  /* Setttb               */
  251         cpufunc_faultstatus,            /* Faultstatus          */
  252         cpufunc_faultaddress,           /* Faultaddress         */
  253 
  254         /* TLB functions */
  255 
  256         armv4_tlb_flushID,              /* tlb_flushID          */
  257         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  258         armv4_tlb_flushI,               /* tlb_flushI           */
  259         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  260         armv4_tlb_flushD,               /* tlb_flushD           */
  261         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  262 
  263         /* Cache operations */
  264 
  265         armv5_ec_icache_sync_all,       /* icache_sync_all      */
  266         armv5_ec_icache_sync_range,     /* icache_sync_range    */
  267 
  268         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
  269         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
  270         sheeva_dcache_inv_range,        /* dcache_inv_range     */
  271         sheeva_dcache_wb_range,         /* dcache_wb_range      */
  272 
  273         armv4_idcache_inv_all,          /* idcache_inv_all      */
  274         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
  275         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
  276 
  277         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
  278         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
  279         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
  280         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
  281         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  282 
  283         /* Other functions */
  284 
  285         cpufunc_nullop,                 /* flush_prefetchbuf    */
  286         armv4_drain_writebuf,           /* drain_writebuf       */
  287         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  288         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  289 
  290         sheeva_cpu_sleep,               /* sleep                */
  291 
  292         /* Soft functions */
  293 
  294         cpufunc_null_fixup,             /* dataabt_fixup        */
  295         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  296 
  297         arm10_context_switch,           /* context_switch       */
  298 
  299         arm10_setup                     /* cpu setup            */
  300 };
  301 #endif /* CPU_ARM9E || CPU_ARM10 */
  302 
  303 #ifdef CPU_ARM10
  304 struct cpu_functions arm10_cpufuncs = {
  305         /* CPU functions */
  306 
  307         cpufunc_id,                     /* id                   */
  308         cpufunc_nullop,                 /* cpwait               */
  309 
  310         /* MMU functions */
  311 
  312         cpufunc_control,                /* control              */
  313         cpufunc_domains,                /* Domain               */
  314         arm10_setttb,                   /* Setttb               */
  315         cpufunc_faultstatus,            /* Faultstatus          */
  316         cpufunc_faultaddress,           /* Faultaddress         */
  317 
  318         /* TLB functions */
  319 
  320         armv4_tlb_flushID,              /* tlb_flushID          */
  321         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  322         armv4_tlb_flushI,               /* tlb_flushI           */
  323         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  324         armv4_tlb_flushD,               /* tlb_flushD           */
  325         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  326 
  327         /* Cache operations */
  328 
  329         arm10_icache_sync_all,          /* icache_sync_all      */
  330         arm10_icache_sync_range,        /* icache_sync_range    */
  331 
  332         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
  333         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
  334         arm10_dcache_inv_range,         /* dcache_inv_range     */
  335         arm10_dcache_wb_range,          /* dcache_wb_range      */
  336 
  337         armv4_idcache_inv_all,          /* idcache_inv_all      */
  338         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
  339         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
  340         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  341         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  342         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  343         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  344         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  345 
  346         /* Other functions */
  347 
  348         cpufunc_nullop,                 /* flush_prefetchbuf    */
  349         armv4_drain_writebuf,           /* drain_writebuf       */
  350         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  351         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  352 
  353         (void *)cpufunc_nullop,         /* sleep                */
  354 
  355         /* Soft functions */
  356 
  357         cpufunc_null_fixup,             /* dataabt_fixup        */
  358         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  359 
  360         arm10_context_switch,           /* context_switch       */
  361 
  362         arm10_setup                     /* cpu setup            */
  363 
  364 };
  365 #endif /* CPU_ARM10 */
  366 
  367 #ifdef CPU_MV_PJ4B
  368 struct cpu_functions pj4bv7_cpufuncs = {
  369         /* CPU functions */
  370 
  371         cpufunc_id,                     /* id                   */
  372         arm11_drain_writebuf,           /* cpwait               */
  373 
  374         /* MMU functions */
  375 
  376         cpufunc_control,                /* control              */
  377         cpufunc_domains,                /* Domain               */
  378         pj4b_setttb,                    /* Setttb               */
  379         cpufunc_faultstatus,            /* Faultstatus          */
  380         cpufunc_faultaddress,           /* Faultaddress         */
  381 
  382         /* TLB functions */
  383 
  384         armv7_tlb_flushID,              /* tlb_flushID          */
  385         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
  386         armv7_tlb_flushID,              /* tlb_flushI           */
  387         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
  388         armv7_tlb_flushID,              /* tlb_flushD           */
  389         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
  390 
  391         /* Cache operations */
  392         armv7_idcache_wbinv_all,        /* icache_sync_all      */
  393         armv7_icache_sync_range,        /* icache_sync_range    */
  394 
  395         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
  396         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
  397         armv7_dcache_inv_range,         /* dcache_inv_range     */
  398         armv7_dcache_wb_range,          /* dcache_wb_range      */
  399 
  400         armv7_idcache_inv_all,          /* idcache_inv_all      */
  401         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
  402         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
  403 
  404         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
  405         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  406         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  407         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  408         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  409 
  410         /* Other functions */
  411 
  412         pj4b_drain_readbuf,             /* flush_prefetchbuf    */
  413         arm11_drain_writebuf,           /* drain_writebuf       */
  414         pj4b_flush_brnchtgt_all,        /* flush_brnchtgt_C     */
  415         pj4b_flush_brnchtgt_va,         /* flush_brnchtgt_E     */
  416 
  417         (void *)cpufunc_nullop,         /* sleep                */
  418 
  419         /* Soft functions */
  420 
  421         cpufunc_null_fixup,             /* dataabt_fixup        */
  422         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  423 
  424         arm11_context_switch,           /* context_switch       */
  425 
  426         pj4bv7_setup                    /* cpu setup            */
  427 };
  428 #endif /* CPU_MV_PJ4B */
  429 
  430 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
  431   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
  432   defined(CPU_XSCALE_80219)
  433 
  434 struct cpu_functions xscale_cpufuncs = {
  435         /* CPU functions */
  436         
  437         cpufunc_id,                     /* id                   */
  438         xscale_cpwait,                  /* cpwait               */
  439 
  440         /* MMU functions */
  441 
  442         xscale_control,                 /* control              */
  443         cpufunc_domains,                /* domain               */
  444         xscale_setttb,                  /* setttb               */
  445         cpufunc_faultstatus,            /* faultstatus          */
  446         cpufunc_faultaddress,           /* faultaddress         */
  447 
  448         /* TLB functions */
  449 
  450         armv4_tlb_flushID,              /* tlb_flushID          */
  451         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  452         armv4_tlb_flushI,               /* tlb_flushI           */
  453         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  454         armv4_tlb_flushD,               /* tlb_flushD           */
  455         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  456 
  457         /* Cache operations */
  458 
  459         xscale_cache_syncI,             /* icache_sync_all      */
  460         xscale_cache_syncI_rng,         /* icache_sync_range    */
  461 
  462         xscale_cache_purgeD,            /* dcache_wbinv_all     */
  463         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
  464         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  465         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
  466 
  467         xscale_cache_flushID,           /* idcache_inv_all      */
  468         xscale_cache_purgeID,           /* idcache_wbinv_all    */
  469         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
  470         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  471         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  472         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  473         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  474         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  475 
  476         /* Other functions */
  477 
  478         cpufunc_nullop,                 /* flush_prefetchbuf    */
  479         armv4_drain_writebuf,           /* drain_writebuf       */
  480         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  481         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  482 
  483         xscale_cpu_sleep,               /* sleep                */
  484 
  485         /* Soft functions */
  486 
  487         cpufunc_null_fixup,             /* dataabt_fixup        */
  488         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  489 
  490         xscale_context_switch,          /* context_switch       */
  491 
  492         xscale_setup                    /* cpu setup            */
  493 };
  494 #endif
  495 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
  496    CPU_XSCALE_80219 */
  497 
  498 #ifdef CPU_XSCALE_81342
  499 struct cpu_functions xscalec3_cpufuncs = {
  500         /* CPU functions */
  501         
  502         cpufunc_id,                     /* id                   */
  503         xscale_cpwait,                  /* cpwait               */
  504 
  505         /* MMU functions */
  506 
  507         xscale_control,                 /* control              */
  508         cpufunc_domains,                /* domain               */
  509         xscalec3_setttb,                /* setttb               */
  510         cpufunc_faultstatus,            /* faultstatus          */
  511         cpufunc_faultaddress,           /* faultaddress         */
  512 
  513         /* TLB functions */
  514 
  515         armv4_tlb_flushID,              /* tlb_flushID          */
  516         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  517         armv4_tlb_flushI,               /* tlb_flushI           */
  518         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  519         armv4_tlb_flushD,               /* tlb_flushD           */
  520         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  521 
  522         /* Cache operations */
  523 
  524         xscalec3_cache_syncI,           /* icache_sync_all      */
  525         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
  526 
  527         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
  528         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
  529         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  530         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
  531 
  532         xscale_cache_flushID,           /* idcache_inv_all      */
  533         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
  534         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
  535         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
  536         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
  537         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
  538         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
  539         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  540 
  541         /* Other functions */
  542 
  543         cpufunc_nullop,                 /* flush_prefetchbuf    */
  544         armv4_drain_writebuf,           /* drain_writebuf       */
  545         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  546         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  547 
  548         xscale_cpu_sleep,               /* sleep                */
  549 
  550         /* Soft functions */
  551 
  552         cpufunc_null_fixup,             /* dataabt_fixup        */
  553         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  554 
  555         xscalec3_context_switch,        /* context_switch       */
  556 
  557         xscale_setup                    /* cpu setup            */
  558 };
  559 #endif /* CPU_XSCALE_81342 */
  560 
  561 
  562 #if defined(CPU_FA526) || defined(CPU_FA626TE)
  563 struct cpu_functions fa526_cpufuncs = {
  564         /* CPU functions */
  565 
  566         cpufunc_id,                     /* id                   */
  567         cpufunc_nullop,                 /* cpwait               */
  568 
  569         /* MMU functions */
  570 
  571         cpufunc_control,                /* control              */
  572         cpufunc_domains,                /* domain               */
  573         fa526_setttb,                   /* setttb               */
  574         cpufunc_faultstatus,            /* faultstatus          */
  575         cpufunc_faultaddress,           /* faultaddress         */
  576 
  577         /* TLB functions */
  578 
  579         armv4_tlb_flushID,              /* tlb_flushID          */
  580         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
  581         armv4_tlb_flushI,               /* tlb_flushI           */
  582         fa526_tlb_flushI_SE,            /* tlb_flushI_SE        */
  583         armv4_tlb_flushD,               /* tlb_flushD           */
  584         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  585 
  586         /* Cache operations */
  587 
  588         fa526_icache_sync_all,          /* icache_sync_all      */
  589         fa526_icache_sync_range,        /* icache_sync_range    */
  590 
  591         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
  592         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
  593         fa526_dcache_inv_range,         /* dcache_inv_range     */
  594         fa526_dcache_wb_range,          /* dcache_wb_range      */
  595 
  596         armv4_idcache_inv_all,          /* idcache_inv_all      */
  597         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
  598         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
  599         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  600         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  601         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  602         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  603         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  604 
  605         /* Other functions */
  606 
  607         fa526_flush_prefetchbuf,        /* flush_prefetchbuf    */
  608         armv4_drain_writebuf,           /* drain_writebuf       */
  609         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  610         fa526_flush_brnchtgt_E,         /* flush_brnchtgt_E     */
  611 
  612         fa526_cpu_sleep,                /* sleep                */
  613 
  614         /* Soft functions */
  615 
  616         cpufunc_null_fixup,             /* dataabt_fixup        */
  617         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  618 
  619         fa526_context_switch,           /* context_switch       */
  620 
  621         fa526_setup                     /* cpu setup            */
  622 };
  623 #endif  /* CPU_FA526 || CPU_FA626TE */
  624 
  625 #if defined(CPU_ARM1136)
  626 struct cpu_functions arm1136_cpufuncs = {
  627         /* CPU functions */
  628         
  629         cpufunc_id,                     /* id                   */
  630         cpufunc_nullop,                 /* cpwait               */
  631         
  632         /* MMU functions */
  633         
  634         cpufunc_control,                /* control              */
  635         cpufunc_domains,                /* Domain               */
  636         arm11x6_setttb,                 /* Setttb               */
  637         cpufunc_faultstatus,            /* Faultstatus          */
  638         cpufunc_faultaddress,           /* Faultaddress         */
  639         
  640         /* TLB functions */
  641         
  642         arm11_tlb_flushID,              /* tlb_flushID          */
  643         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
  644         arm11_tlb_flushI,               /* tlb_flushI           */
  645         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
  646         arm11_tlb_flushD,               /* tlb_flushD           */
  647         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
  648         
  649         /* Cache operations */
  650         
  651         arm11x6_icache_sync_all,        /* icache_sync_all      */
  652         arm11x6_icache_sync_range,      /* icache_sync_range    */
  653         
  654         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
  655         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
  656         armv6_dcache_inv_range,         /* dcache_inv_range     */
  657         armv6_dcache_wb_range,          /* dcache_wb_range      */
  658         
  659         armv6_idcache_inv_all,          /* idcache_inv_all      */
  660         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
  661         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
  662         
  663         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
  664         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  665         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  666         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  667         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  668         
  669         /* Other functions */
  670         
  671         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
  672         arm11_drain_writebuf,           /* drain_writebuf       */
  673         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  674         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  675         
  676         arm11_sleep,                    /* sleep                */
  677         
  678         /* Soft functions */
  679         
  680         cpufunc_null_fixup,             /* dataabt_fixup        */
  681         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  682         
  683         arm11_context_switch,           /* context_switch       */
  684         
  685         arm11x6_setup                   /* cpu setup            */
  686 };
  687 #endif /* CPU_ARM1136 */
  688 #if defined(CPU_ARM1176)
  689 struct cpu_functions arm1176_cpufuncs = {
  690         /* CPU functions */
  691         
  692         cpufunc_id,                     /* id                   */
  693         cpufunc_nullop,                 /* cpwait               */
  694         
  695         /* MMU functions */
  696         
  697         cpufunc_control,                /* control              */
  698         cpufunc_domains,                /* Domain               */
  699         arm11x6_setttb,                 /* Setttb               */
  700         cpufunc_faultstatus,            /* Faultstatus          */
  701         cpufunc_faultaddress,           /* Faultaddress         */
  702         
  703         /* TLB functions */
  704         
  705         arm11_tlb_flushID,              /* tlb_flushID          */
  706         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
  707         arm11_tlb_flushI,               /* tlb_flushI           */
  708         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
  709         arm11_tlb_flushD,               /* tlb_flushD           */
  710         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
  711         
  712         /* Cache operations */
  713         
  714         arm11x6_icache_sync_all,        /* icache_sync_all      */
  715         arm11x6_icache_sync_range,      /* icache_sync_range    */
  716         
  717         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
  718         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
  719         armv6_dcache_inv_range,         /* dcache_inv_range     */
  720         armv6_dcache_wb_range,          /* dcache_wb_range      */
  721         
  722         armv6_idcache_inv_all,          /* idcache_inv_all      */
  723         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
  724         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
  725         
  726         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
  727         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  728         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  729         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  730         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  731         
  732         /* Other functions */
  733         
  734         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
  735         arm11_drain_writebuf,           /* drain_writebuf       */
  736         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  737         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  738         
  739         arm11x6_sleep,                  /* sleep                */
  740         
  741         /* Soft functions */
  742         
  743         cpufunc_null_fixup,             /* dataabt_fixup        */
  744         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  745         
  746         arm11_context_switch,           /* context_switch       */
  747         
  748         arm11x6_setup                   /* cpu setup            */
  749 };
  750 #endif /*CPU_ARM1176 */
  751 
  752 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
  753 struct cpu_functions cortexa_cpufuncs = {
  754         /* CPU functions */
  755         
  756         cpufunc_id,                     /* id                   */
  757         cpufunc_nullop,                 /* cpwait               */
  758         
  759         /* MMU functions */
  760         
  761         cpufunc_control,                /* control              */
  762         cpufunc_domains,                /* Domain               */
  763         armv7_setttb,                   /* Setttb               */
  764         cpufunc_faultstatus,            /* Faultstatus          */
  765         cpufunc_faultaddress,           /* Faultaddress         */
  766         
  767         /* 
  768          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
  769          * whether the hardware implements separate I+D or not, so we use the
  770          * same 'ID' functions for all 3 variations.
  771          */
  772         
  773         armv7_tlb_flushID,              /* tlb_flushID          */
  774         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
  775         armv7_tlb_flushID,              /* tlb_flushI           */
  776         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
  777         armv7_tlb_flushID,              /* tlb_flushD           */
  778         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
  779         
  780         /* Cache operations */
  781         
  782         armv7_icache_sync_all,          /* icache_sync_all      */
  783         armv7_icache_sync_range,        /* icache_sync_range    */
  784         
  785         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
  786         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
  787         armv7_dcache_inv_range,         /* dcache_inv_range     */
  788         armv7_dcache_wb_range,          /* dcache_wb_range      */
  789         
  790         armv7_idcache_inv_all,          /* idcache_inv_all      */
  791         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
  792         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
  793         
  794         /* 
  795          * Note: For CPUs using the PL310 the L2 ops are filled in when the
  796          * L2 cache controller is actually enabled.
  797          */
  798         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  799         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  800         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  801         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  802         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
  803         
  804         /* Other functions */
  805         
  806         cpufunc_nullop,                 /* flush_prefetchbuf    */
  807         armv7_drain_writebuf,           /* drain_writebuf       */
  808         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  809         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  810         
  811         armv7_sleep,                    /* sleep                */
  812         
  813         /* Soft functions */
  814         
  815         cpufunc_null_fixup,             /* dataabt_fixup        */
  816         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  817         
  818         armv7_context_switch,           /* context_switch       */
  819         
  820         cortexa_setup                     /* cpu setup            */
  821 };
  822 #endif /* CPU_CORTEXA */
  823 
  824 /*
  825  * Global constants also used by locore.s
  826  */
  827 
  828 struct cpu_functions cpufuncs;
  829 u_int cputype;
  830 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
  831 
  832 #if defined(CPU_ARM9) ||        \
  833   defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||        \
  834   defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
  835   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
  836   defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||                 \
  837   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
  838   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
  839 
  840 /* Global cache line sizes, use 32 as default */
  841 int     arm_dcache_min_line_size = 32;
  842 int     arm_icache_min_line_size = 32;
  843 int     arm_idcache_min_line_size = 32;
  844 
  845 static void get_cachetype_cp15(void);
  846 
  847 /* Additional cache information local to this file.  Log2 of some of the
  848    above numbers.  */
  849 static int      arm_dcache_l2_nsets;
  850 static int      arm_dcache_l2_assoc;
  851 static int      arm_dcache_l2_linesize;
  852 
  853 static void
  854 get_cachetype_cp15()
  855 {
  856         u_int ctype, isize, dsize, cpuid;
  857         u_int clevel, csize, i, sel;
  858         u_int multiplier;
  859         u_char type;
  860 
  861         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
  862                 : "=r" (ctype));
  863 
  864         cpuid = cpufunc_id();
  865         /*
  866          * ...and thus spake the ARM ARM:
  867          *
  868          * If an <opcode2> value corresponding to an unimplemented or
  869          * reserved ID register is encountered, the System Control
  870          * processor returns the value of the main ID register.
  871          */
  872         if (ctype == cpuid)
  873                 goto out;
  874 
  875         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
  876                 /* Resolve minimal cache line sizes */
  877                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
  878                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
  879                 arm_idcache_min_line_size =
  880                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
  881 
  882                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
  883                     : "=r" (clevel));
  884                 arm_cache_level = clevel;
  885                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
  886                 i = 0;
  887                 while ((type = (clevel & 0x7)) && i < 7) {
  888                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
  889                             type == CACHE_SEP_CACHE) {
  890                                 sel = i << 1;
  891                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
  892                                     : : "r" (sel));
  893                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
  894                                     : "=r" (csize));
  895                                 arm_cache_type[sel] = csize;
  896                                 arm_dcache_align = 1 << 
  897                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
  898                                 arm_dcache_align_mask = arm_dcache_align - 1;
  899                         }
  900                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
  901                                 sel = (i << 1) | 1;
  902                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
  903                                     : : "r" (sel));
  904                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
  905                                     : "=r" (csize));
  906                                 arm_cache_type[sel] = csize;
  907                         }
  908                         i++;
  909                         clevel >>= 3;
  910                 }
  911         } else {
  912                 if ((ctype & CPU_CT_S) == 0)
  913                         arm_pcache_unified = 1;
  914 
  915                 /*
  916                  * If you want to know how this code works, go read the ARM ARM.
  917                  */
  918 
  919                 arm_pcache_type = CPU_CT_CTYPE(ctype);
  920 
  921                 if (arm_pcache_unified == 0) {
  922                         isize = CPU_CT_ISIZE(ctype);
  923                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
  924                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
  925                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
  926                                 if (isize & CPU_CT_xSIZE_M)
  927                                         arm_picache_line_size = 0; /* not present */
  928                                 else
  929                                         arm_picache_ways = 1;
  930                         } else {
  931                                 arm_picache_ways = multiplier <<
  932                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
  933                         }
  934                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
  935                 }
  936 
  937                 dsize = CPU_CT_DSIZE(ctype);
  938                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
  939                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
  940                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
  941                         if (dsize & CPU_CT_xSIZE_M)
  942                                 arm_pdcache_line_size = 0; /* not present */
  943                         else
  944                                 arm_pdcache_ways = 1;
  945                 } else {
  946                         arm_pdcache_ways = multiplier <<
  947                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
  948                 }
  949                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
  950 
  951                 arm_dcache_align = arm_pdcache_line_size;
  952 
  953                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
  954                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
  955                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
  956                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
  957 
  958         out:
  959                 arm_dcache_align_mask = arm_dcache_align - 1;
  960         }
  961 }
  962 #endif /* ARM9 || XSCALE */
  963 
  964 /*
  965  * Cannot panic here as we may not have a console yet ...
  966  */
  967 
  968 int
  969 set_cpufuncs()
  970 {
  971         cputype = cpufunc_id();
  972         cputype &= CPU_ID_CPU_MASK;
  973 
  974         /*
  975          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
  976          * CPU type where we want to use it by default, then we set it.
  977          */
  978 
  979 #ifdef CPU_ARM9
  980         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
  981              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
  982             (cputype & 0x0000f000) == 0x00009000) {
  983                 cpufuncs = arm9_cpufuncs;
  984                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
  985                 get_cachetype_cp15();
  986                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
  987                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
  988                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
  989                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
  990                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
  991 #ifdef ARM9_CACHE_WRITE_THROUGH
  992                 pmap_pte_init_arm9();
  993 #else
  994                 pmap_pte_init_generic();
  995 #endif
  996                 goto out;
  997         }
  998 #endif /* CPU_ARM9 */
  999 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
 1000         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
 1001             cputype == CPU_ID_MV88FR571_41) {
 1002                 uint32_t sheeva_ctrl;
 1003 
 1004                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
 1005                     MV_L2_ENABLE);
 1006                 /*
 1007                  * Workaround for Marvell MV78100 CPU: Cache prefetch
 1008                  * mechanism may affect the cache coherency validity,
 1009                  * so it needs to be disabled.
 1010                  *
 1011                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
 1012                  * L2 Prefetching Mechanism) for details.
 1013                  */
 1014                 if (cputype == CPU_ID_MV88FR571_VD ||
 1015                     cputype == CPU_ID_MV88FR571_41)
 1016                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
 1017 
 1018                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
 1019 
 1020                 cpufuncs = sheeva_cpufuncs;
 1021                 get_cachetype_cp15();
 1022                 pmap_pte_init_generic();
 1023                 goto out;
 1024         } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
 1025                 cpufuncs = armv5_ec_cpufuncs;
 1026                 get_cachetype_cp15();
 1027                 pmap_pte_init_generic();
 1028                 goto out;
 1029         }
 1030 #endif /* CPU_ARM9E || CPU_ARM10 */
 1031 #ifdef CPU_ARM10
 1032         if (/* cputype == CPU_ID_ARM1020T || */
 1033             cputype == CPU_ID_ARM1020E) {
 1034                 /*
 1035                  * Select write-through cacheing (this isn't really an
 1036                  * option on ARM1020T).
 1037                  */
 1038                 cpufuncs = arm10_cpufuncs;
 1039                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1040                 get_cachetype_cp15();
 1041                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
 1042                 arm10_dcache_sets_max =
 1043                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
 1044                     arm10_dcache_sets_inc;
 1045                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
 1046                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
 1047                 pmap_pte_init_generic();
 1048                 goto out;
 1049         }
 1050 #endif /* CPU_ARM10 */
 1051 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
 1052         if (cputype == CPU_ID_ARM1136JS
 1053             || cputype == CPU_ID_ARM1136JSR1
 1054             || cputype == CPU_ID_ARM1176JZS) {
 1055 #ifdef CPU_ARM1136
 1056                 if (cputype == CPU_ID_ARM1136JS
 1057                     || cputype == CPU_ID_ARM1136JSR1)
 1058                         cpufuncs = arm1136_cpufuncs;
 1059 #endif
 1060 #ifdef CPU_ARM1176
 1061                 if (cputype == CPU_ID_ARM1176JZS)
 1062                         cpufuncs = arm1176_cpufuncs;
 1063 #endif
 1064                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1065                 get_cachetype_cp15();
 1066 
 1067                 pmap_pte_init_mmu_v6();
 1068 
 1069                 goto out;
 1070         }
 1071 #endif /* CPU_ARM1136 || CPU_ARM1176 */
 1072 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
 1073         if (cputype == CPU_ID_CORTEXA7 ||
 1074             cputype == CPU_ID_CORTEXA8R1 ||
 1075             cputype == CPU_ID_CORTEXA8R2 ||
 1076             cputype == CPU_ID_CORTEXA8R3 ||
 1077             cputype == CPU_ID_CORTEXA9R1 ||
 1078             cputype == CPU_ID_CORTEXA9R2 ||
 1079             cputype == CPU_ID_CORTEXA9R3 ||
 1080             cputype == CPU_ID_CORTEXA15R0 ||
 1081             cputype == CPU_ID_CORTEXA15R1 ||
 1082             cputype == CPU_ID_CORTEXA15R2 ||
 1083             cputype == CPU_ID_CORTEXA15R3 ||
 1084             cputype == CPU_ID_KRAIT ) {
 1085                 cpufuncs = cortexa_cpufuncs;
 1086                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1087                 get_cachetype_cp15();
 1088                 
 1089                 pmap_pte_init_mmu_v6();
 1090                 /* Use powersave on this CPU. */
 1091                 cpu_do_powersave = 1;
 1092                 goto out;
 1093         }
 1094 #endif /* CPU_CORTEXA */
 1095                 
 1096 #if defined(CPU_MV_PJ4B)
 1097         if (cputype == CPU_ID_MV88SV581X_V7 ||
 1098             cputype == CPU_ID_MV88SV584X_V7 ||
 1099             cputype == CPU_ID_ARM_88SV581X_V7) {
 1100                 cpufuncs = pj4bv7_cpufuncs;
 1101                 get_cachetype_cp15();
 1102                 pmap_pte_init_mmu_v6();
 1103                 goto out;
 1104         }
 1105 #endif /* CPU_MV_PJ4B */
 1106 
 1107 #if defined(CPU_FA526) || defined(CPU_FA626TE)
 1108         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
 1109                 cpufuncs = fa526_cpufuncs;
 1110                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1111                 get_cachetype_cp15();
 1112                 pmap_pte_init_generic();
 1113 
 1114                 /* Use powersave on this CPU. */
 1115                 cpu_do_powersave = 1;
 1116 
 1117                 goto out;
 1118         }
 1119 #endif  /* CPU_FA526 || CPU_FA626TE */
 1120 
 1121 #ifdef CPU_XSCALE_80200
 1122         if (cputype == CPU_ID_80200) {
 1123                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
 1124 
 1125                 i80200_icu_init();
 1126 
 1127 #if defined(XSCALE_CCLKCFG)
 1128                 /*
 1129                  * Crank CCLKCFG to maximum legal value.
 1130                  */
 1131                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
 1132                         :
 1133                         : "r" (XSCALE_CCLKCFG));
 1134 #endif
 1135 
 1136                 /*
 1137                  * XXX Disable ECC in the Bus Controller Unit; we
 1138                  * don't really support it, yet.  Clear any pending
 1139                  * error indications.
 1140                  */
 1141                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
 1142                         :
 1143                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
 1144 
 1145                 cpufuncs = xscale_cpufuncs;
 1146                 /*
 1147                  * i80200 errata: Step-A0 and A1 have a bug where
 1148                  * D$ dirty bits are not cleared on "invalidate by
 1149                  * address".
 1150                  *
 1151                  * Workaround: Clean cache line before invalidating.
 1152                  */
 1153                 if (rev == 0 || rev == 1)
 1154                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
 1155 
 1156                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1157                 get_cachetype_cp15();
 1158                 pmap_pte_init_xscale();
 1159                 goto out;
 1160         }
 1161 #endif /* CPU_XSCALE_80200 */
 1162 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
 1163         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
 1164             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
 1165             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
 1166                 cpufuncs = xscale_cpufuncs;
 1167                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1168                 get_cachetype_cp15();
 1169                 pmap_pte_init_xscale();
 1170                 goto out;
 1171         }
 1172 #endif /* CPU_XSCALE_80321 */
 1173 
 1174 #if defined(CPU_XSCALE_81342)
 1175         if (cputype == CPU_ID_81342) {
 1176                 cpufuncs = xscalec3_cpufuncs;
 1177                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1178                 get_cachetype_cp15();
 1179                 pmap_pte_init_xscale();
 1180                 goto out;
 1181         }
 1182 #endif /* CPU_XSCALE_81342 */
 1183 #ifdef CPU_XSCALE_PXA2X0
 1184         /* ignore core revision to test PXA2xx CPUs */
 1185         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
 1186             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
 1187             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
 1188 
 1189                 cpufuncs = xscale_cpufuncs;
 1190                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1191                 get_cachetype_cp15();
 1192                 pmap_pte_init_xscale();
 1193 
 1194                 /* Use powersave on this CPU. */
 1195                 cpu_do_powersave = 1;
 1196 
 1197                 goto out;
 1198         }
 1199 #endif /* CPU_XSCALE_PXA2X0 */
 1200 #ifdef CPU_XSCALE_IXP425
 1201         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
 1202             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
 1203 
 1204                 cpufuncs = xscale_cpufuncs;
 1205                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1206                 get_cachetype_cp15();
 1207                 pmap_pte_init_xscale();
 1208 
 1209                 goto out;
 1210         }
 1211 #endif /* CPU_XSCALE_IXP425 */
 1212         /*
 1213          * Bzzzz. And the answer was ...
 1214          */
 1215         panic("No support for this CPU type (%08x) in kernel", cputype);
 1216         return(ARCHITECTURE_NOT_PRESENT);
 1217 out:
 1218         uma_set_align(arm_dcache_align_mask);
 1219         return (0);
 1220 }
 1221 
 1222 /*
 1223  * Fixup routines for data and prefetch aborts.
 1224  *
 1225  * Several compile time symbols are used
 1226  *
 1227  * DEBUG_FAULT_CORRECTION - Print debugging information during the
 1228  * correction of registers after a fault.
 1229  */
 1230 
 1231 
 1232 /*
 1233  * Null abort fixup routine.
 1234  * For use when no fixup is required.
 1235  */
 1236 int
 1237 cpufunc_null_fixup(arg)
 1238         void *arg;
 1239 {
 1240         return(ABORT_FIXUP_OK);
 1241 }
 1242 
 1243 /*
 1244  * CPU Setup code
 1245  */
 1246 
 1247 #if defined (CPU_ARM9) || \
 1248   defined(CPU_ARM9E) || \
 1249   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
 1250   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
 1251   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
 1252   defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
 1253   defined(CPU_FA526) || defined(CPU_FA626TE)
 1254 
 1255 #define IGN     0
 1256 #define OR      1
 1257 #define BIC     2
 1258 
 1259 struct cpu_option {
 1260         char    *co_name;
 1261         int     co_falseop;
 1262         int     co_trueop;
 1263         int     co_value;
 1264 };
 1265 
 1266 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
 1267 
 1268 static u_int
 1269 parse_cpu_options(args, optlist, cpuctrl)
 1270         char *args;
 1271         struct cpu_option *optlist;
 1272         u_int cpuctrl;
 1273 {
 1274         int integer;
 1275 
 1276         if (args == NULL)
 1277                 return(cpuctrl);
 1278 
 1279         while (optlist->co_name) {
 1280                 if (get_bootconf_option(args, optlist->co_name,
 1281                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1282                         if (integer) {
 1283                                 if (optlist->co_trueop == OR)
 1284                                         cpuctrl |= optlist->co_value;
 1285                                 else if (optlist->co_trueop == BIC)
 1286                                         cpuctrl &= ~optlist->co_value;
 1287                         } else {
 1288                                 if (optlist->co_falseop == OR)
 1289                                         cpuctrl |= optlist->co_value;
 1290                                 else if (optlist->co_falseop == BIC)
 1291                                         cpuctrl &= ~optlist->co_value;
 1292                         }
 1293                 }
 1294                 ++optlist;
 1295         }
 1296         return(cpuctrl);
 1297 }
 1298 #endif /* CPU_ARM9 || XSCALE*/
 1299 
 1300 #ifdef CPU_ARM9
 1301 struct cpu_option arm9_options[] = {
 1302         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1303         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1304         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1305         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1306         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1307         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1308         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1309         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1310         { NULL,                 IGN, IGN, 0 }
 1311 };
 1312 
 1313 void
 1314 arm9_setup(args)
 1315         char *args;
 1316 {
 1317         int cpuctrl, cpuctrlmask;
 1318 
 1319         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1320             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1321             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1322             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
 1323             CPU_CONTROL_ROUNDROBIN;
 1324         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1325                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1326                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1327                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1328                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1329                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
 1330                  | CPU_CONTROL_ROUNDROBIN;
 1331 
 1332 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1333         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1334 #endif
 1335 
 1336         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
 1337 
 1338 #ifdef __ARMEB__
 1339         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1340 #endif
 1341         if (vector_page == ARM_VECTORS_HIGH)
 1342                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1343 
 1344         /* Clear out the cache */
 1345         cpu_idcache_wbinv_all();
 1346 
 1347         /* Set the control register */
 1348         cpu_control(cpuctrlmask, cpuctrl);
 1349         ctrl = cpuctrl;
 1350 
 1351 }
 1352 #endif  /* CPU_ARM9 */
 1353 
 1354 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
 1355 struct cpu_option arm10_options[] = {
 1356         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1357         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1358         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1359         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1360         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1361         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1362         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1363         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1364         { NULL,                 IGN, IGN, 0 }
 1365 };
 1366 
 1367 void
 1368 arm10_setup(args)
 1369         char *args;
 1370 {
 1371         int cpuctrl, cpuctrlmask;
 1372 
 1373         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1374             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1375             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
 1376         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1377             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1378             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1379             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1380             | CPU_CONTROL_BPRD_ENABLE
 1381             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
 1382 
 1383 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1384         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1385 #endif
 1386 
 1387         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
 1388 
 1389 #ifdef __ARMEB__
 1390         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1391 #endif
 1392 
 1393         /* Clear out the cache */
 1394         cpu_idcache_wbinv_all();
 1395 
 1396         /* Now really make sure they are clean.  */
 1397         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
 1398 
 1399         if (vector_page == ARM_VECTORS_HIGH)
 1400                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1401 
 1402         /* Set the control register */
 1403         ctrl = cpuctrl;
 1404         cpu_control(0xffffffff, cpuctrl);
 1405 
 1406         /* And again. */
 1407         cpu_idcache_wbinv_all();
 1408 }
 1409 #endif  /* CPU_ARM9E || CPU_ARM10 */
 1410 
 1411 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) \
 1412  || defined(CPU_MV_PJ4B) \
 1413  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
 1414 static __inline void
 1415 cpu_scc_setup_ccnt(void)
 1416 {
 1417 /* This is how you give userland access to the CCNT and PMCn
 1418  * registers.
 1419  * BEWARE! This gives write access also, which may not be what
 1420  * you want!
 1421  */
 1422 #ifdef _PMC_USER_READ_WRITE_
 1423 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
 1424         /* Use the Secure User and Non-secure Access Validation Control Register
 1425          * to allow userland access
 1426          */
 1427         __asm volatile ("mcr    p15, 0, %0, c15, c9, 0\n\t"
 1428                         :
 1429                         : "r"(0x00000001));
 1430 #else
 1431         /* Set PMUSERENR[0] to allow userland access */
 1432         __asm volatile ("mcr    p15, 0, %0, c9, c14, 0\n\t"
 1433                         :
 1434                         : "r"(0x00000001));
 1435 #endif
 1436 #endif
 1437 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
 1438         /* Set PMCR[2,0] to enable counters and reset CCNT */
 1439         __asm volatile ("mcr    p15, 0, %0, c15, c12, 0\n\t"
 1440                         :
 1441                         : "r"(0x00000005));
 1442 #else
 1443         /* Set up the PMCCNTR register as a cyclecounter:
 1444          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
 1445          * Set PMCR[2,0] to enable counters and reset CCNT
 1446          * Set PMCNTENSET to 0x80000000 to enable CCNT */
 1447         __asm volatile ("mcr    p15, 0, %0, c9, c14, 2\n\t"
 1448                         "mcr    p15, 0, %1, c9, c12, 0\n\t"
 1449                         "mcr    p15, 0, %2, c9, c12, 1\n\t"
 1450                         :
 1451                         : "r"(0xFFFFFFFF),
 1452                           "r"(0x00000005),
 1453                           "r"(0x80000000));
 1454 #endif
 1455 }
 1456 #endif
 1457 
 1458 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
 1459 struct cpu_option arm11_options[] = {
 1460         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1461         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1462         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1463         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1464         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1465         { NULL,                 IGN, IGN, 0 }
 1466 };
 1467 
 1468 void
 1469 arm11x6_setup(char *args)
 1470 {
 1471         int cpuctrl, cpuctrl_wax;
 1472         uint32_t auxctrl, auxctrl_wax;
 1473         uint32_t tmp, tmp2;
 1474         uint32_t sbz=0;
 1475         uint32_t cpuid;
 1476 
 1477         cpuid = cpufunc_id();
 1478 
 1479         cpuctrl =
 1480                 CPU_CONTROL_MMU_ENABLE  |
 1481                 CPU_CONTROL_DC_ENABLE   |
 1482                 CPU_CONTROL_WBUF_ENABLE |
 1483                 CPU_CONTROL_32BP_ENABLE |
 1484                 CPU_CONTROL_32BD_ENABLE |
 1485                 CPU_CONTROL_LABT_ENABLE |
 1486                 CPU_CONTROL_SYST_ENABLE |
 1487                 CPU_CONTROL_IC_ENABLE;
 1488 
 1489         /*
 1490          * "write as existing" bits
 1491          * inverse of this is mask
 1492          */
 1493         cpuctrl_wax =
 1494                 (3 << 30) | /* SBZ */
 1495                 (1 << 29) | /* FA */
 1496                 (1 << 28) | /* TR */
 1497                 (3 << 26) | /* SBZ */ 
 1498                 (3 << 19) | /* SBZ */
 1499                 (1 << 17);  /* SBZ */
 1500 
 1501         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
 1502         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
 1503 
 1504         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
 1505 
 1506 #ifdef __ARMEB__
 1507         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1508 #endif
 1509 
 1510         if (vector_page == ARM_VECTORS_HIGH)
 1511                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1512 
 1513         auxctrl = 0;
 1514         auxctrl_wax = ~0;
 1515         /*
 1516          * This options enables the workaround for the 364296 ARM1136
 1517          * r0pX errata (possible cache data corruption with
 1518          * hit-under-miss enabled). It sets the undocumented bit 31 in
 1519          * the auxiliary control register and the FI bit in the control
 1520          * register, thus disabling hit-under-miss without putting the
 1521          * processor into full low interrupt latency mode. ARM11MPCore
 1522          * is not affected.
 1523          */
 1524         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
 1525                 cpuctrl |= CPU_CONTROL_FI_ENABLE;
 1526                 auxctrl = ARM1136_AUXCTL_PFI;
 1527                 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
 1528         }
 1529 
 1530         /*
 1531          * Enable an errata workaround
 1532          */
 1533         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
 1534                 auxctrl = ARM1176_AUXCTL_PHD;
 1535                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
 1536         }
 1537 
 1538         /* Clear out the cache */
 1539         cpu_idcache_wbinv_all();
 1540 
 1541         /* Now really make sure they are clean.  */
 1542         __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
 1543 
 1544         /* Allow detection code to find the VFP if it's fitted.  */
 1545         __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
 1546 
 1547         /* Set the control register */
 1548         ctrl = cpuctrl;
 1549         cpu_control(~cpuctrl_wax, cpuctrl);
 1550 
 1551         __asm volatile ("mrc    p15, 0, %0, c1, c0, 1\n\t"
 1552                         "and    %1, %0, %2\n\t"
 1553                         "orr    %1, %1, %3\n\t"
 1554                         "teq    %0, %1\n\t"
 1555                         "mcrne  p15, 0, %1, c1, c0, 1\n\t"
 1556                         : "=r"(tmp), "=r"(tmp2) :
 1557                           "r"(auxctrl_wax), "r"(auxctrl));
 1558 
 1559         /* And again. */
 1560         cpu_idcache_wbinv_all();
 1561 
 1562         cpu_scc_setup_ccnt();
 1563 }
 1564 #endif  /* CPU_ARM1136 || CPU_ARM1176 */
 1565 
 1566 #ifdef CPU_MV_PJ4B
 1567 void
 1568 pj4bv7_setup(args)
 1569         char *args;
 1570 {
 1571         int cpuctrl;
 1572 
 1573         pj4b_config();
 1574 
 1575         cpuctrl = CPU_CONTROL_MMU_ENABLE;
 1576 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1577         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1578 #endif
 1579         cpuctrl |= CPU_CONTROL_DC_ENABLE;
 1580         cpuctrl |= (0xf << 3);
 1581         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
 1582         cpuctrl |= CPU_CONTROL_IC_ENABLE;
 1583         if (vector_page == ARM_VECTORS_HIGH)
 1584                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1585         cpuctrl |= (0x5 << 16) | (1 < 22);
 1586         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
 1587 
 1588         /* Clear out the cache */
 1589         cpu_idcache_wbinv_all();
 1590 
 1591         /* Set the control register */
 1592         ctrl = cpuctrl;
 1593         cpu_control(0xFFFFFFFF, cpuctrl);
 1594 
 1595         /* And again. */
 1596         cpu_idcache_wbinv_all();
 1597 
 1598         cpu_scc_setup_ccnt();
 1599 }
 1600 #endif /* CPU_MV_PJ4B */
 1601 
 1602 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
 1603 
 1604 void
 1605 cortexa_setup(char *args)
 1606 {
 1607         int cpuctrl, cpuctrlmask;
 1608         
 1609         cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
 1610             CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
 1611             CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
 1612             CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
 1613             CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
 1614             CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
 1615         
 1616         cpuctrl = CPU_CONTROL_MMU_ENABLE |
 1617             CPU_CONTROL_IC_ENABLE |
 1618             CPU_CONTROL_DC_ENABLE |
 1619             CPU_CONTROL_BPRD_ENABLE;
 1620         
 1621 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1622         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1623 #endif
 1624         
 1625         /* Switch to big endian */
 1626 #ifdef __ARMEB__
 1627         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1628 #endif
 1629         
 1630         /* Check if the vector page is at the high address (0xffff0000) */
 1631         if (vector_page == ARM_VECTORS_HIGH)
 1632                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1633         
 1634         /* Clear out the cache */
 1635         cpu_idcache_wbinv_all();
 1636         
 1637         /* Set the control register */
 1638         ctrl = cpuctrl;
 1639         cpu_control(cpuctrlmask, cpuctrl);
 1640         
 1641         /* And again. */
 1642         cpu_idcache_wbinv_all();
 1643 #ifdef SMP
 1644         armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
 1645 #endif
 1646 
 1647         cpu_scc_setup_ccnt();
 1648 }
 1649 #endif  /* CPU_CORTEXA */
 1650 
 1651 #if defined(CPU_FA526) || defined(CPU_FA626TE)
 1652 struct cpu_option fa526_options[] = {
 1653 #ifdef COMPAT_12
 1654         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE |
 1655                                            CPU_CONTROL_DC_ENABLE) },
 1656         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 1657 #endif  /* COMPAT_12 */
 1658         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE |
 1659                                            CPU_CONTROL_DC_ENABLE) },
 1660         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE |
 1661                                            CPU_CONTROL_DC_ENABLE) },
 1662         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1663         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1664         { NULL,                 IGN, IGN, 0 }
 1665 };
 1666 
 1667 void
 1668 fa526_setup(char *args)
 1669 {
 1670         int cpuctrl, cpuctrlmask;
 1671 
 1672         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1673                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1674                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1675                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 1676                 | CPU_CONTROL_BPRD_ENABLE;
 1677         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1678                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1679                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1680                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1681                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1682                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 1683                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 1684 
 1685 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1686         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1687 #endif
 1688 
 1689         cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
 1690 
 1691 #ifdef __ARMEB__
 1692         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1693 #endif
 1694 
 1695         if (vector_page == ARM_VECTORS_HIGH)
 1696                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1697 
 1698         /* Clear out the cache */
 1699         cpu_idcache_wbinv_all();
 1700 
 1701         /* Set the control register */
 1702         ctrl = cpuctrl;
 1703         cpu_control(0xffffffff, cpuctrl);
 1704 }
 1705 #endif  /* CPU_FA526 || CPU_FA626TE */
 1706 
 1707 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
 1708   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
 1709   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
 1710 struct cpu_option xscale_options[] = {
 1711 #ifdef COMPAT_12
 1712         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1713         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1714 #endif  /* COMPAT_12 */
 1715         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1716         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1717         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1718         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1719         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1720         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1721         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1722         { NULL,                 IGN, IGN, 0 }
 1723 };
 1724 
 1725 void
 1726 xscale_setup(args)
 1727         char *args;
 1728 {
 1729         uint32_t auxctl;
 1730         int cpuctrl, cpuctrlmask;
 1731 
 1732         /*
 1733          * The XScale Write Buffer is always enabled.  Our option
 1734          * is to enable/disable coalescing.  Note that bits 6:3
 1735          * must always be enabled.
 1736          */
 1737 
 1738         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1739                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1740                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1741                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 1742                  | CPU_CONTROL_BPRD_ENABLE;
 1743         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1744                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1745                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1746                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1747                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1748                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 1749                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
 1750                  CPU_CONTROL_L2_ENABLE;
 1751 
 1752 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1753         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1754 #endif
 1755 
 1756         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
 1757 
 1758 #ifdef __ARMEB__
 1759         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1760 #endif
 1761 
 1762         if (vector_page == ARM_VECTORS_HIGH)
 1763                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1764 #ifdef CPU_XSCALE_CORE3
 1765         cpuctrl |= CPU_CONTROL_L2_ENABLE;
 1766 #endif
 1767 
 1768         /* Clear out the cache */
 1769         cpu_idcache_wbinv_all();
 1770 
 1771         /*
 1772          * Set the control register.  Note that bits 6:3 must always
 1773          * be set to 1.
 1774          */
 1775         ctrl = cpuctrl;
 1776 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 1777         cpu_control(0xffffffff, cpuctrl);
 1778 
 1779         /* Make sure write coalescing is turned on */
 1780         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
 1781                 : "=r" (auxctl));
 1782 #ifdef XSCALE_NO_COALESCE_WRITES
 1783         auxctl |= XSCALE_AUXCTL_K;
 1784 #else
 1785         auxctl &= ~XSCALE_AUXCTL_K;
 1786 #endif
 1787 #ifdef CPU_XSCALE_CORE3
 1788         auxctl |= XSCALE_AUXCTL_LLR;
 1789         auxctl |= XSCALE_AUXCTL_MD_MASK;
 1790 #endif
 1791         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
 1792                 : : "r" (auxctl));
 1793 }
 1794 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
 1795            CPU_XSCALE_80219 */

Cache object: ab30ec56cd373f8195b4c75cb7a10fdf


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.