The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
    2 
    3 /*-
    4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
    5  * arm8 support code Copyright (c) 1997 ARM Limited
    6  * arm8 support code Copyright (c) 1997 Causality Limited
    7  * arm9 support code Copyright (C) 2001 ARM Ltd
    8  * Copyright (c) 1997 Mark Brinicombe.
    9  * Copyright (c) 1997 Causality Limited
   10  * All rights reserved.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by Causality Limited.
   23  * 4. The name of Causality Limited may not be used to endorse or promote
   24  *    products derived from this software without specific prior written
   25  *    permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   37  * SUCH DAMAGE.
   38  *
   39  * RiscBSD kernel project
   40  *
   41  * cpufuncs.c
   42  *
   43  * C functions for supporting CPU / MMU / TLB specific operations.
   44  *
   45  * Created      : 30/01/97
   46  */
   47 #include <sys/cdefs.h>
   48 __FBSDID("$FreeBSD: releng/10.0/sys/arm/arm/cpufunc.c 253857 2013-08-01 10:06:19Z ganbold $");
   49 
   50 #include <sys/param.h>
   51 #include <sys/systm.h>
   52 #include <sys/lock.h>
   53 #include <sys/mutex.h>
   54 #include <sys/bus.h>
   55 #include <machine/bus.h>
   56 #include <machine/cpu.h>
   57 #include <machine/disassem.h>
   58 
   59 #include <vm/vm.h>
   60 #include <vm/pmap.h>
   61 #include <vm/uma.h>
   62 
   63 #include <machine/cpuconf.h>
   64 #include <machine/cpufunc.h>
   65 #include <machine/bootconfig.h>
   66 
   67 #ifdef CPU_XSCALE_80200
   68 #include <arm/xscale/i80200/i80200reg.h>
   69 #include <arm/xscale/i80200/i80200var.h>
   70 #endif
   71 
   72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
   73 #include <arm/xscale/i80321/i80321reg.h>
   74 #include <arm/xscale/i80321/i80321var.h>
   75 #endif
   76 
   77 /*
   78  * Some definitions in i81342reg.h clash with i80321reg.h.
   79  * This only happens for the LINT kernel. As it happens,
   80  * we don't need anything from i81342reg.h that we already
   81  * got from somewhere else during a LINT compile.
   82  */
   83 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
   84 #include <arm/xscale/i8134x/i81342reg.h>
   85 #endif
   86 
   87 #ifdef CPU_XSCALE_IXP425
   88 #include <arm/xscale/ixp425/ixp425reg.h>
   89 #include <arm/xscale/ixp425/ixp425var.h>
   90 #endif
   91 
   92 /* PRIMARY CACHE VARIABLES */
   93 int     arm_picache_size;
   94 int     arm_picache_line_size;
   95 int     arm_picache_ways;
   96 
   97 int     arm_pdcache_size;       /* and unified */
   98 int     arm_pdcache_line_size;
   99 int     arm_pdcache_ways;
  100 
  101 int     arm_pcache_type;
  102 int     arm_pcache_unified;
  103 
  104 int     arm_dcache_align;
  105 int     arm_dcache_align_mask;
  106 
  107 u_int   arm_cache_level;
  108 u_int   arm_cache_type[14];
  109 u_int   arm_cache_loc;
  110 
  111 /* 1 == use cpu_sleep(), 0 == don't */
  112 int cpu_do_powersave;
  113 int ctrl;
  114 
  115 #ifdef CPU_ARM7TDMI
  116 struct cpu_functions arm7tdmi_cpufuncs = {
  117         /* CPU functions */
  118         
  119         cpufunc_id,                     /* id                   */
  120         cpufunc_nullop,                 /* cpwait               */
  121 
  122         /* MMU functions */
  123 
  124         cpufunc_control,                /* control              */
  125         cpufunc_domains,                /* domain               */
  126         arm7tdmi_setttb,                /* setttb               */
  127         cpufunc_faultstatus,            /* faultstatus          */
  128         cpufunc_faultaddress,           /* faultaddress         */
  129 
  130         /* TLB functions */
  131 
  132         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
  133         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
  134         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
  135         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
  136         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
  137         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
  138 
  139         /* Cache operations */
  140 
  141         cpufunc_nullop,                 /* icache_sync_all      */
  142         (void *)cpufunc_nullop,         /* icache_sync_range    */
  143 
  144         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
  145         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
  146         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
  147         (void *)cpufunc_nullop,         /* dcache_wb_range      */
  148 
  149         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
  150         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
  151         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  152         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  153         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  154         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  155 
  156         /* Other functions */
  157 
  158         cpufunc_nullop,                 /* flush_prefetchbuf    */
  159         cpufunc_nullop,                 /* drain_writebuf       */
  160         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  161         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  162 
  163         (void *)cpufunc_nullop,         /* sleep                */
  164 
  165         /* Soft functions */
  166 
  167         late_abort_fixup,               /* dataabt_fixup        */
  168         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  169 
  170         arm7tdmi_context_switch,        /* context_switch       */
  171 
  172         arm7tdmi_setup                  /* cpu setup            */
  173 
  174 };
  175 #endif  /* CPU_ARM7TDMI */
  176 
  177 #ifdef CPU_ARM8
  178 struct cpu_functions arm8_cpufuncs = {
  179         /* CPU functions */
  180         
  181         cpufunc_id,                     /* id                   */
  182         cpufunc_nullop,                 /* cpwait               */
  183 
  184         /* MMU functions */
  185 
  186         cpufunc_control,                /* control              */
  187         cpufunc_domains,                /* domain               */
  188         arm8_setttb,                    /* setttb               */
  189         cpufunc_faultstatus,            /* faultstatus          */
  190         cpufunc_faultaddress,           /* faultaddress         */
  191 
  192         /* TLB functions */
  193 
  194         arm8_tlb_flushID,               /* tlb_flushID          */
  195         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
  196         arm8_tlb_flushID,               /* tlb_flushI           */
  197         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
  198         arm8_tlb_flushID,               /* tlb_flushD           */
  199         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
  200 
  201         /* Cache operations */
  202 
  203         cpufunc_nullop,                 /* icache_sync_all      */
  204         (void *)cpufunc_nullop,         /* icache_sync_range    */
  205 
  206         arm8_cache_purgeID,             /* dcache_wbinv_all     */
  207         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
  208 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
  209         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
  210 
  211         arm8_cache_purgeID,             /* idcache_wbinv_all    */
  212         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
  213         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  214         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  215         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  216         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  217 
  218         /* Other functions */
  219 
  220         cpufunc_nullop,                 /* flush_prefetchbuf    */
  221         cpufunc_nullop,                 /* drain_writebuf       */
  222         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  223         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  224 
  225         (void *)cpufunc_nullop,         /* sleep                */
  226 
  227         /* Soft functions */
  228 
  229         cpufunc_null_fixup,             /* dataabt_fixup        */
  230         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  231 
  232         arm8_context_switch,            /* context_switch       */
  233 
  234         arm8_setup                      /* cpu setup            */
  235 };
  236 #endif  /* CPU_ARM8 */
  237 
  238 #ifdef CPU_ARM9
  239 struct cpu_functions arm9_cpufuncs = {
  240         /* CPU functions */
  241 
  242         cpufunc_id,                     /* id                   */
  243         cpufunc_nullop,                 /* cpwait               */
  244 
  245         /* MMU functions */
  246 
  247         cpufunc_control,                /* control              */
  248         cpufunc_domains,                /* Domain               */
  249         arm9_setttb,                    /* Setttb               */
  250         cpufunc_faultstatus,            /* Faultstatus          */
  251         cpufunc_faultaddress,           /* Faultaddress         */
  252 
  253         /* TLB functions */
  254 
  255         armv4_tlb_flushID,              /* tlb_flushID          */
  256         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
  257         armv4_tlb_flushI,               /* tlb_flushI           */
  258         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  259         armv4_tlb_flushD,               /* tlb_flushD           */
  260         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  261 
  262         /* Cache operations */
  263 
  264         arm9_icache_sync_all,           /* icache_sync_all      */
  265         arm9_icache_sync_range,         /* icache_sync_range    */
  266 
  267         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
  268         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
  269         arm9_dcache_inv_range,          /* dcache_inv_range     */
  270         arm9_dcache_wb_range,           /* dcache_wb_range      */
  271 
  272         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
  273         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
  274         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  275         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  276         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  277         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  278 
  279         /* Other functions */
  280 
  281         cpufunc_nullop,                 /* flush_prefetchbuf    */
  282         armv4_drain_writebuf,           /* drain_writebuf       */
  283         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  284         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  285 
  286         (void *)cpufunc_nullop,         /* sleep                */
  287 
  288         /* Soft functions */
  289 
  290         cpufunc_null_fixup,             /* dataabt_fixup        */
  291         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  292 
  293         arm9_context_switch,            /* context_switch       */
  294 
  295         arm9_setup                      /* cpu setup            */
  296 
  297 };
  298 #endif /* CPU_ARM9 */
  299 
  300 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
  301 struct cpu_functions armv5_ec_cpufuncs = {
  302         /* CPU functions */
  303 
  304         cpufunc_id,                     /* id                   */
  305         cpufunc_nullop,                 /* cpwait               */
  306 
  307         /* MMU functions */
  308 
  309         cpufunc_control,                /* control              */
  310         cpufunc_domains,                /* Domain               */
  311         armv5_ec_setttb,                /* Setttb               */
  312         cpufunc_faultstatus,            /* Faultstatus          */
  313         cpufunc_faultaddress,           /* Faultaddress         */
  314 
  315         /* TLB functions */
  316 
  317         armv4_tlb_flushID,              /* tlb_flushID          */
  318         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  319         armv4_tlb_flushI,               /* tlb_flushI           */
  320         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  321         armv4_tlb_flushD,               /* tlb_flushD           */
  322         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  323 
  324         /* Cache operations */
  325 
  326         armv5_ec_icache_sync_all,       /* icache_sync_all      */
  327         armv5_ec_icache_sync_range,     /* icache_sync_range    */
  328 
  329         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
  330         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
  331         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
  332         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
  333 
  334         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
  335         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
  336 
  337         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  338         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  339         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  340         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  341 
  342         /* Other functions */
  343 
  344         cpufunc_nullop,                 /* flush_prefetchbuf    */
  345         armv4_drain_writebuf,           /* drain_writebuf       */
  346         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  347         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  348 
  349         (void *)cpufunc_nullop,         /* sleep                */
  350 
  351         /* Soft functions */
  352 
  353         cpufunc_null_fixup,             /* dataabt_fixup        */
  354         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  355 
  356         arm10_context_switch,           /* context_switch       */
  357 
  358         arm10_setup                     /* cpu setup            */
  359 
  360 };
  361 
  362 struct cpu_functions sheeva_cpufuncs = {
  363         /* CPU functions */
  364 
  365         cpufunc_id,                     /* id                   */
  366         cpufunc_nullop,                 /* cpwait               */
  367 
  368         /* MMU functions */
  369 
  370         cpufunc_control,                /* control              */
  371         cpufunc_domains,                /* Domain               */
  372         sheeva_setttb,                  /* Setttb               */
  373         cpufunc_faultstatus,            /* Faultstatus          */
  374         cpufunc_faultaddress,           /* Faultaddress         */
  375 
  376         /* TLB functions */
  377 
  378         armv4_tlb_flushID,              /* tlb_flushID          */
  379         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  380         armv4_tlb_flushI,               /* tlb_flushI           */
  381         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  382         armv4_tlb_flushD,               /* tlb_flushD           */
  383         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  384 
  385         /* Cache operations */
  386 
  387         armv5_ec_icache_sync_all,       /* icache_sync_all      */
  388         armv5_ec_icache_sync_range,     /* icache_sync_range    */
  389 
  390         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
  391         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
  392         sheeva_dcache_inv_range,        /* dcache_inv_range     */
  393         sheeva_dcache_wb_range,         /* dcache_wb_range      */
  394 
  395         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
  396         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
  397 
  398         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
  399         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
  400         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
  401         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
  402 
  403         /* Other functions */
  404 
  405         cpufunc_nullop,                 /* flush_prefetchbuf    */
  406         armv4_drain_writebuf,           /* drain_writebuf       */
  407         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  408         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  409 
  410         sheeva_cpu_sleep,               /* sleep                */
  411 
  412         /* Soft functions */
  413 
  414         cpufunc_null_fixup,             /* dataabt_fixup        */
  415         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  416 
  417         arm10_context_switch,           /* context_switch       */
  418 
  419         arm10_setup                     /* cpu setup            */
  420 };
  421 #endif /* CPU_ARM9E || CPU_ARM10 */
  422 
  423 #ifdef CPU_ARM10
  424 struct cpu_functions arm10_cpufuncs = {
  425         /* CPU functions */
  426 
  427         cpufunc_id,                     /* id                   */
  428         cpufunc_nullop,                 /* cpwait               */
  429 
  430         /* MMU functions */
  431 
  432         cpufunc_control,                /* control              */
  433         cpufunc_domains,                /* Domain               */
  434         arm10_setttb,                   /* Setttb               */
  435         cpufunc_faultstatus,            /* Faultstatus          */
  436         cpufunc_faultaddress,           /* Faultaddress         */
  437 
  438         /* TLB functions */
  439 
  440         armv4_tlb_flushID,              /* tlb_flushID          */
  441         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  442         armv4_tlb_flushI,               /* tlb_flushI           */
  443         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  444         armv4_tlb_flushD,               /* tlb_flushD           */
  445         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  446 
  447         /* Cache operations */
  448 
  449         arm10_icache_sync_all,          /* icache_sync_all      */
  450         arm10_icache_sync_range,        /* icache_sync_range    */
  451 
  452         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
  453         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
  454         arm10_dcache_inv_range,         /* dcache_inv_range     */
  455         arm10_dcache_wb_range,          /* dcache_wb_range      */
  456 
  457         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
  458         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
  459         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  460         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  461         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  462         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  463 
  464         /* Other functions */
  465 
  466         cpufunc_nullop,                 /* flush_prefetchbuf    */
  467         armv4_drain_writebuf,           /* drain_writebuf       */
  468         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  469         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  470 
  471         (void *)cpufunc_nullop,         /* sleep                */
  472 
  473         /* Soft functions */
  474 
  475         cpufunc_null_fixup,             /* dataabt_fixup        */
  476         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  477 
  478         arm10_context_switch,           /* context_switch       */
  479 
  480         arm10_setup                     /* cpu setup            */
  481 
  482 };
  483 #endif /* CPU_ARM10 */
  484 
  485 #ifdef CPU_MV_PJ4B
  486 struct cpu_functions pj4bv7_cpufuncs = {
  487         /* CPU functions */
  488 
  489         cpufunc_id,                     /* id                   */
  490         arm11_drain_writebuf,           /* cpwait               */
  491 
  492         /* MMU functions */
  493 
  494         cpufunc_control,                /* control              */
  495         cpufunc_domains,                /* Domain               */
  496         pj4b_setttb,                    /* Setttb               */
  497         cpufunc_faultstatus,            /* Faultstatus          */
  498         cpufunc_faultaddress,           /* Faultaddress         */
  499 
  500         /* TLB functions */
  501 
  502         armv7_tlb_flushID,              /* tlb_flushID          */
  503         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
  504         armv7_tlb_flushID,              /* tlb_flushI           */
  505         armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
  506         armv7_tlb_flushID,              /* tlb_flushD           */
  507         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
  508 
  509         /* Cache operations */
  510         armv7_idcache_wbinv_all,        /* icache_sync_all      */
  511         armv7_icache_sync_range,        /* icache_sync_range    */
  512 
  513         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
  514         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
  515         armv7_dcache_inv_range,         /* dcache_inv_range     */
  516         armv7_dcache_wb_range,          /* dcache_wb_range      */
  517 
  518         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
  519         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
  520 
  521         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
  522         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  523         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  524         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  525 
  526         /* Other functions */
  527 
  528         pj4b_drain_readbuf,             /* flush_prefetchbuf    */
  529         arm11_drain_writebuf,           /* drain_writebuf       */
  530         pj4b_flush_brnchtgt_all,        /* flush_brnchtgt_C     */
  531         pj4b_flush_brnchtgt_va,         /* flush_brnchtgt_E     */
  532 
  533         (void *)cpufunc_nullop,         /* sleep                */
  534 
  535         /* Soft functions */
  536 
  537         cpufunc_null_fixup,             /* dataabt_fixup        */
  538         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  539 
  540         arm11_context_switch,           /* context_switch       */
  541 
  542         pj4bv7_setup                    /* cpu setup            */
  543 };
  544 
  545 struct cpu_functions pj4bv6_cpufuncs = {
  546         /* CPU functions */
  547 
  548         cpufunc_id,                     /* id                   */
  549         arm11_drain_writebuf,           /* cpwait               */
  550 
  551         /* MMU functions */
  552 
  553         cpufunc_control,                /* control              */
  554         cpufunc_domains,                /* Domain               */
  555         pj4b_setttb,                    /* Setttb               */
  556         cpufunc_faultstatus,            /* Faultstatus          */
  557         cpufunc_faultaddress,           /* Faultaddress         */
  558 
  559         /* TLB functions */
  560 
  561         arm11_tlb_flushID,              /* tlb_flushID          */
  562         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
  563         arm11_tlb_flushI,               /* tlb_flushI           */
  564         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
  565         arm11_tlb_flushD,               /* tlb_flushD           */
  566         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
  567 
  568         /* Cache operations */
  569         armv6_icache_sync_all,          /* icache_sync_all      */
  570         pj4b_icache_sync_range,         /* icache_sync_range    */
  571 
  572         armv6_dcache_wbinv_all,         /* dcache_wbinv_all     */
  573         pj4b_dcache_wbinv_range,        /* dcache_wbinv_range   */
  574         pj4b_dcache_inv_range,          /* dcache_inv_range     */
  575         pj4b_dcache_wb_range,           /* dcache_wb_range      */
  576 
  577         armv6_idcache_wbinv_all,        /* idcache_wbinv_all    */
  578         pj4b_idcache_wbinv_range,       /* idcache_wbinv_all    */
  579 
  580         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
  581         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  582         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  583         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  584 
  585         /* Other functions */
  586 
  587         pj4b_drain_readbuf,             /* flush_prefetchbuf    */
  588         arm11_drain_writebuf,           /* drain_writebuf       */
  589         pj4b_flush_brnchtgt_all,        /* flush_brnchtgt_C     */
  590         pj4b_flush_brnchtgt_va,         /* flush_brnchtgt_E     */
  591 
  592         (void *)cpufunc_nullop,         /* sleep                */
  593 
  594         /* Soft functions */
  595 
  596         cpufunc_null_fixup,             /* dataabt_fixup        */
  597         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  598 
  599         arm11_context_switch,           /* context_switch       */
  600 
  601         pj4bv6_setup                    /* cpu setup            */
  602 };
  603 #endif /* CPU_MV_PJ4B */
  604 
  605 #ifdef CPU_SA110
  606 struct cpu_functions sa110_cpufuncs = {
  607         /* CPU functions */
  608         
  609         cpufunc_id,                     /* id                   */
  610         cpufunc_nullop,                 /* cpwait               */
  611 
  612         /* MMU functions */
  613 
  614         cpufunc_control,                /* control              */
  615         cpufunc_domains,                /* domain               */
  616         sa1_setttb,                     /* setttb               */
  617         cpufunc_faultstatus,            /* faultstatus          */
  618         cpufunc_faultaddress,           /* faultaddress         */
  619 
  620         /* TLB functions */
  621 
  622         armv4_tlb_flushID,              /* tlb_flushID          */
  623         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  624         armv4_tlb_flushI,               /* tlb_flushI           */
  625         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  626         armv4_tlb_flushD,               /* tlb_flushD           */
  627         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  628 
  629         /* Cache operations */
  630 
  631         sa1_cache_syncI,                /* icache_sync_all      */
  632         sa1_cache_syncI_rng,            /* icache_sync_range    */
  633 
  634         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  635         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  636 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  637         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  638 
  639         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  640         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  641         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  642         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  643         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  644         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  645 
  646         /* Other functions */
  647 
  648         cpufunc_nullop,                 /* flush_prefetchbuf    */
  649         armv4_drain_writebuf,           /* drain_writebuf       */
  650         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  651         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  652 
  653         (void *)cpufunc_nullop,         /* sleep                */
  654 
  655         /* Soft functions */
  656 
  657         cpufunc_null_fixup,             /* dataabt_fixup        */
  658         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  659 
  660         sa110_context_switch,           /* context_switch       */
  661 
  662         sa110_setup                     /* cpu setup            */
  663 };
  664 #endif  /* CPU_SA110 */
  665 
  666 #if defined(CPU_SA1100) || defined(CPU_SA1110)
  667 struct cpu_functions sa11x0_cpufuncs = {
  668         /* CPU functions */
  669         
  670         cpufunc_id,                     /* id                   */
  671         cpufunc_nullop,                 /* cpwait               */
  672 
  673         /* MMU functions */
  674 
  675         cpufunc_control,                /* control              */
  676         cpufunc_domains,                /* domain               */
  677         sa1_setttb,                     /* setttb               */
  678         cpufunc_faultstatus,            /* faultstatus          */
  679         cpufunc_faultaddress,           /* faultaddress         */
  680 
  681         /* TLB functions */
  682 
  683         armv4_tlb_flushID,              /* tlb_flushID          */
  684         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  685         armv4_tlb_flushI,               /* tlb_flushI           */
  686         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  687         armv4_tlb_flushD,               /* tlb_flushD           */
  688         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  689 
  690         /* Cache operations */
  691 
  692         sa1_cache_syncI,                /* icache_sync_all      */
  693         sa1_cache_syncI_rng,            /* icache_sync_range    */
  694 
  695         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  696         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  697 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  698         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  699 
  700         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  701         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  702         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  703         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  704         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  705         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  706 
  707         /* Other functions */
  708 
  709         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
  710         armv4_drain_writebuf,           /* drain_writebuf       */
  711         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  712         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  713 
  714         sa11x0_cpu_sleep,               /* sleep                */
  715 
  716         /* Soft functions */
  717 
  718         cpufunc_null_fixup,             /* dataabt_fixup        */
  719         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  720 
  721         sa11x0_context_switch,          /* context_switch       */
  722 
  723         sa11x0_setup                    /* cpu setup            */
  724 };
  725 #endif  /* CPU_SA1100 || CPU_SA1110 */
  726 
  727 #ifdef CPU_IXP12X0
  728 struct cpu_functions ixp12x0_cpufuncs = {
  729         /* CPU functions */
  730         
  731         cpufunc_id,                     /* id                   */
  732         cpufunc_nullop,                 /* cpwait               */
  733 
  734         /* MMU functions */
  735 
  736         cpufunc_control,                /* control              */
  737         cpufunc_domains,                /* domain               */
  738         sa1_setttb,                     /* setttb               */
  739         cpufunc_faultstatus,            /* faultstatus          */
  740         cpufunc_faultaddress,           /* faultaddress         */
  741 
  742         /* TLB functions */
  743 
  744         armv4_tlb_flushID,              /* tlb_flushID          */
  745         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  746         armv4_tlb_flushI,               /* tlb_flushI           */
  747         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  748         armv4_tlb_flushD,               /* tlb_flushD           */
  749         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  750 
  751         /* Cache operations */
  752 
  753         sa1_cache_syncI,                /* icache_sync_all      */
  754         sa1_cache_syncI_rng,            /* icache_sync_range    */
  755 
  756         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  757         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  758 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  759         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  760 
  761         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  762         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  763         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  764         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  765         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  766         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  767 
  768         /* Other functions */
  769 
  770         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
  771         armv4_drain_writebuf,           /* drain_writebuf       */
  772         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  773         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  774 
  775         (void *)cpufunc_nullop,         /* sleep                */
  776 
  777         /* Soft functions */
  778 
  779         cpufunc_null_fixup,             /* dataabt_fixup        */
  780         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  781 
  782         ixp12x0_context_switch,         /* context_switch       */
  783 
  784         ixp12x0_setup                   /* cpu setup            */
  785 };
  786 #endif  /* CPU_IXP12X0 */
  787 
  788 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
  789   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
  790   defined(CPU_XSCALE_80219)
  791 
  792 struct cpu_functions xscale_cpufuncs = {
  793         /* CPU functions */
  794         
  795         cpufunc_id,                     /* id                   */
  796         xscale_cpwait,                  /* cpwait               */
  797 
  798         /* MMU functions */
  799 
  800         xscale_control,                 /* control              */
  801         cpufunc_domains,                /* domain               */
  802         xscale_setttb,                  /* setttb               */
  803         cpufunc_faultstatus,            /* faultstatus          */
  804         cpufunc_faultaddress,           /* faultaddress         */
  805 
  806         /* TLB functions */
  807 
  808         armv4_tlb_flushID,              /* tlb_flushID          */
  809         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  810         armv4_tlb_flushI,               /* tlb_flushI           */
  811         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  812         armv4_tlb_flushD,               /* tlb_flushD           */
  813         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  814 
  815         /* Cache operations */
  816 
  817         xscale_cache_syncI,             /* icache_sync_all      */
  818         xscale_cache_syncI_rng,         /* icache_sync_range    */
  819 
  820         xscale_cache_purgeD,            /* dcache_wbinv_all     */
  821         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
  822         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  823         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
  824 
  825         xscale_cache_purgeID,           /* idcache_wbinv_all    */
  826         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
  827         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  828         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  829         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  830         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  831 
  832         /* Other functions */
  833 
  834         cpufunc_nullop,                 /* flush_prefetchbuf    */
  835         armv4_drain_writebuf,           /* drain_writebuf       */
  836         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  837         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  838 
  839         xscale_cpu_sleep,               /* sleep                */
  840 
  841         /* Soft functions */
  842 
  843         cpufunc_null_fixup,             /* dataabt_fixup        */
  844         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  845 
  846         xscale_context_switch,          /* context_switch       */
  847 
  848         xscale_setup                    /* cpu setup            */
  849 };
  850 #endif
  851 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
  852    CPU_XSCALE_80219 */
  853 
  854 #ifdef CPU_XSCALE_81342
  855 struct cpu_functions xscalec3_cpufuncs = {
  856         /* CPU functions */
  857         
  858         cpufunc_id,                     /* id                   */
  859         xscale_cpwait,                  /* cpwait               */
  860 
  861         /* MMU functions */
  862 
  863         xscale_control,                 /* control              */
  864         cpufunc_domains,                /* domain               */
  865         xscalec3_setttb,                /* setttb               */
  866         cpufunc_faultstatus,            /* faultstatus          */
  867         cpufunc_faultaddress,           /* faultaddress         */
  868 
  869         /* TLB functions */
  870 
  871         armv4_tlb_flushID,              /* tlb_flushID          */
  872         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  873         armv4_tlb_flushI,               /* tlb_flushI           */
  874         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  875         armv4_tlb_flushD,               /* tlb_flushD           */
  876         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  877 
  878         /* Cache operations */
  879 
  880         xscalec3_cache_syncI,           /* icache_sync_all      */
  881         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
  882 
  883         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
  884         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
  885         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  886         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
  887 
  888         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
  889         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
  890         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
  891         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
  892         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
  893         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
  894 
  895         /* Other functions */
  896 
  897         cpufunc_nullop,                 /* flush_prefetchbuf    */
  898         armv4_drain_writebuf,           /* drain_writebuf       */
  899         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  900         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  901 
  902         xscale_cpu_sleep,               /* sleep                */
  903 
  904         /* Soft functions */
  905 
  906         cpufunc_null_fixup,             /* dataabt_fixup        */
  907         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  908 
  909         xscalec3_context_switch,        /* context_switch       */
  910 
  911         xscale_setup                    /* cpu setup            */
  912 };
  913 #endif /* CPU_XSCALE_81342 */
  914 
  915 
  916 #if defined(CPU_FA526) || defined(CPU_FA626TE)
  917 struct cpu_functions fa526_cpufuncs = {
  918         /* CPU functions */
  919 
  920         cpufunc_id,                     /* id                   */
  921         cpufunc_nullop,                 /* cpwait               */
  922 
  923         /* MMU functions */
  924 
  925         cpufunc_control,                /* control              */
  926         cpufunc_domains,                /* domain               */
  927         fa526_setttb,                   /* setttb               */
  928         cpufunc_faultstatus,            /* faultstatus          */
  929         cpufunc_faultaddress,           /* faultaddress         */
  930 
  931         /* TLB functions */
  932 
  933         armv4_tlb_flushID,              /* tlb_flushID          */
  934         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
  935         armv4_tlb_flushI,               /* tlb_flushI           */
  936         fa526_tlb_flushI_SE,            /* tlb_flushI_SE        */
  937         armv4_tlb_flushD,               /* tlb_flushD           */
  938         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  939 
  940         /* Cache operations */
  941 
  942         fa526_icache_sync_all,          /* icache_sync_all      */
  943         fa526_icache_sync_range,        /* icache_sync_range    */
  944 
  945         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
  946         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
  947         fa526_dcache_inv_range,         /* dcache_inv_range     */
  948         fa526_dcache_wb_range,          /* dcache_wb_range      */
  949 
  950         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
  951         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
  952         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  953         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  954         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  955         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  956 
  957         /* Other functions */
  958 
  959         fa526_flush_prefetchbuf,        /* flush_prefetchbuf    */
  960         armv4_drain_writebuf,           /* drain_writebuf       */
  961         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  962         fa526_flush_brnchtgt_E,         /* flush_brnchtgt_E     */
  963 
  964         fa526_cpu_sleep,                /* sleep                */
  965 
  966         /* Soft functions */
  967 
  968         cpufunc_null_fixup,             /* dataabt_fixup        */
  969         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  970 
  971         fa526_context_switch,           /* context_switch       */
  972 
  973         fa526_setup                     /* cpu setup            */
  974 };
  975 #endif  /* CPU_FA526 || CPU_FA626TE */
  976 
  977 #if defined(CPU_ARM1136)
  978 struct cpu_functions arm1136_cpufuncs = {
  979         /* CPU functions */
  980         
  981         cpufunc_id,                     /* id                   */
  982         cpufunc_nullop,                 /* cpwait               */
  983         
  984         /* MMU functions */
  985         
  986         cpufunc_control,                /* control              */
  987         cpufunc_domains,                /* Domain               */
  988         arm11x6_setttb,                 /* Setttb               */
  989         cpufunc_faultstatus,            /* Faultstatus          */
  990         cpufunc_faultaddress,           /* Faultaddress         */
  991         
  992         /* TLB functions */
  993         
  994         arm11_tlb_flushID,              /* tlb_flushID          */
  995         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
  996         arm11_tlb_flushI,               /* tlb_flushI           */
  997         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
  998         arm11_tlb_flushD,               /* tlb_flushD           */
  999         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
 1000         
 1001         /* Cache operations */
 1002         
 1003         arm11x6_icache_sync_all,        /* icache_sync_all      */
 1004         arm11x6_icache_sync_range,      /* icache_sync_range    */
 1005         
 1006         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
 1007         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
 1008         armv6_dcache_inv_range,         /* dcache_inv_range     */
 1009         armv6_dcache_wb_range,          /* dcache_wb_range      */
 1010         
 1011         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
 1012         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
 1013         
 1014         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
 1015         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
 1016         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
 1017         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
 1018         
 1019         /* Other functions */
 1020         
 1021         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
 1022         arm11_drain_writebuf,           /* drain_writebuf       */
 1023         cpufunc_nullop,                 /* flush_brnchtgt_C     */
 1024         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
 1025         
 1026         arm11_sleep,                    /* sleep                */
 1027         
 1028         /* Soft functions */
 1029         
 1030         cpufunc_null_fixup,             /* dataabt_fixup        */
 1031         cpufunc_null_fixup,             /* prefetchabt_fixup    */
 1032         
 1033         arm11_context_switch,           /* context_switch       */
 1034         
 1035         arm11x6_setup                   /* cpu setup            */
 1036 };
 1037 #endif /* CPU_ARM1136 */
 1038 #if defined(CPU_ARM1176)
 1039 struct cpu_functions arm1176_cpufuncs = {
 1040         /* CPU functions */
 1041         
 1042         cpufunc_id,                     /* id                   */
 1043         cpufunc_nullop,                 /* cpwait               */
 1044         
 1045         /* MMU functions */
 1046         
 1047         cpufunc_control,                /* control              */
 1048         cpufunc_domains,                /* Domain               */
 1049         arm11x6_setttb,                 /* Setttb               */
 1050         cpufunc_faultstatus,            /* Faultstatus          */
 1051         cpufunc_faultaddress,           /* Faultaddress         */
 1052         
 1053         /* TLB functions */
 1054         
 1055         arm11_tlb_flushID,              /* tlb_flushID          */
 1056         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
 1057         arm11_tlb_flushI,               /* tlb_flushI           */
 1058         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
 1059         arm11_tlb_flushD,               /* tlb_flushD           */
 1060         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
 1061         
 1062         /* Cache operations */
 1063         
 1064         arm11x6_icache_sync_all,        /* icache_sync_all      */
 1065         arm11x6_icache_sync_range,      /* icache_sync_range    */
 1066         
 1067         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
 1068         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
 1069         armv6_dcache_inv_range,         /* dcache_inv_range     */
 1070         armv6_dcache_wb_range,          /* dcache_wb_range      */
 1071         
 1072         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
 1073         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
 1074         
 1075         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
 1076         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
 1077         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
 1078         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
 1079         
 1080         /* Other functions */
 1081         
 1082         arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
 1083         arm11_drain_writebuf,           /* drain_writebuf       */
 1084         cpufunc_nullop,                 /* flush_brnchtgt_C     */
 1085         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
 1086         
 1087         arm11x6_sleep,                  /* sleep                */
 1088         
 1089         /* Soft functions */
 1090         
 1091         cpufunc_null_fixup,             /* dataabt_fixup        */
 1092         cpufunc_null_fixup,             /* prefetchabt_fixup    */
 1093         
 1094         arm11_context_switch,           /* context_switch       */
 1095         
 1096         arm11x6_setup                   /* cpu setup            */
 1097 };
 1098 #endif /*CPU_ARM1176 */
 1099 
 1100 #if defined(CPU_CORTEXA)
 1101 struct cpu_functions cortexa_cpufuncs = {
 1102         /* CPU functions */
 1103         
 1104         cpufunc_id,                     /* id                   */
 1105         cpufunc_nullop,                 /* cpwait               */
 1106         
 1107         /* MMU functions */
 1108         
 1109         cpufunc_control,                /* control              */
 1110         cpufunc_domains,                /* Domain               */
 1111         armv7_setttb,                   /* Setttb               */
 1112         cpufunc_faultstatus,            /* Faultstatus          */
 1113         cpufunc_faultaddress,           /* Faultaddress         */
 1114         
 1115         /* TLB functions */
 1116         
 1117         armv7_tlb_flushID,              /* tlb_flushID          */
 1118         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
 1119         arm11_tlb_flushI,               /* tlb_flushI           */
 1120         arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
 1121         arm11_tlb_flushD,               /* tlb_flushD           */
 1122         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
 1123         
 1124         /* Cache operations */
 1125         
 1126         armv7_idcache_wbinv_all,         /* icache_sync_all      */
 1127         armv7_icache_sync_range,        /* icache_sync_range    */
 1128         
 1129         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
 1130         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
 1131         armv7_dcache_inv_range,         /* dcache_inv_range     */
 1132         armv7_dcache_wb_range,          /* dcache_wb_range      */
 1133         
 1134         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
 1135         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
 1136         
 1137         /* 
 1138          * Note: For CPUs using the PL310 the L2 ops are filled in when the
 1139          * L2 cache controller is actually enabled.
 1140          */
 1141         cpufunc_nullop,                 /* l2cache_wbinv_all    */
 1142         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
 1143         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
 1144         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
 1145         
 1146         /* Other functions */
 1147         
 1148         cpufunc_nullop,                 /* flush_prefetchbuf    */
 1149         armv7_drain_writebuf,           /* drain_writebuf       */
 1150         cpufunc_nullop,                 /* flush_brnchtgt_C     */
 1151         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
 1152         
 1153         arm11_sleep,                    /* sleep                */
 1154         
 1155         /* Soft functions */
 1156         
 1157         cpufunc_null_fixup,             /* dataabt_fixup        */
 1158         cpufunc_null_fixup,             /* prefetchabt_fixup    */
 1159         
 1160         armv7_context_switch,           /* context_switch       */
 1161         
 1162         cortexa_setup                     /* cpu setup            */
 1163 };
 1164 #endif /* CPU_CORTEXA */
 1165 
 1166 /*
 1167  * Global constants also used by locore.s
 1168  */
 1169 
 1170 struct cpu_functions cpufuncs;
 1171 u_int cputype;
 1172 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
 1173 
 1174 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||  \
 1175   defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||        \
 1176   defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
 1177   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
 1178   defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||                 \
 1179   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
 1180   defined(CPU_CORTEXA)
 1181 
 1182 static void get_cachetype_cp15(void);
 1183 
 1184 /* Additional cache information local to this file.  Log2 of some of the
 1185    above numbers.  */
 1186 static int      arm_dcache_l2_nsets;
 1187 static int      arm_dcache_l2_assoc;
 1188 static int      arm_dcache_l2_linesize;
 1189 
 1190 static void
 1191 get_cachetype_cp15()
 1192 {
 1193         u_int ctype, isize, dsize, cpuid;
 1194         u_int clevel, csize, i, sel;
 1195         u_int multiplier;
 1196         u_char type;
 1197 
 1198         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
 1199                 : "=r" (ctype));
 1200 
 1201         cpuid = cpufunc_id();
 1202         /*
 1203          * ...and thus spake the ARM ARM:
 1204          *
 1205          * If an <opcode2> value corresponding to an unimplemented or
 1206          * reserved ID register is encountered, the System Control
 1207          * processor returns the value of the main ID register.
 1208          */
 1209         if (ctype == cpuid)
 1210                 goto out;
 1211 
 1212         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
 1213                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
 1214                     : "=r" (clevel));
 1215                 arm_cache_level = clevel;
 1216                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
 1217                 i = 0;
 1218                 while ((type = (clevel & 0x7)) && i < 7) {
 1219                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
 1220                             type == CACHE_SEP_CACHE) {
 1221                                 sel = i << 1;
 1222                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
 1223                                     : : "r" (sel));
 1224                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
 1225                                     : "=r" (csize));
 1226                                 arm_cache_type[sel] = csize;
 1227                                 arm_dcache_align = 1 << 
 1228                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
 1229                                 arm_dcache_align_mask = arm_dcache_align - 1;
 1230                         }
 1231                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
 1232                                 sel = (i << 1) | 1;
 1233                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
 1234                                     : : "r" (sel));
 1235                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
 1236                                     : "=r" (csize));
 1237                                 arm_cache_type[sel] = csize;
 1238                         }
 1239                         i++;
 1240                         clevel >>= 3;
 1241                 }
 1242         } else {
 1243                 if ((ctype & CPU_CT_S) == 0)
 1244                         arm_pcache_unified = 1;
 1245 
 1246                 /*
 1247                  * If you want to know how this code works, go read the ARM ARM.
 1248                  */
 1249 
 1250                 arm_pcache_type = CPU_CT_CTYPE(ctype);
 1251 
 1252                 if (arm_pcache_unified == 0) {
 1253                         isize = CPU_CT_ISIZE(ctype);
 1254                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
 1255                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
 1256                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
 1257                                 if (isize & CPU_CT_xSIZE_M)
 1258                                         arm_picache_line_size = 0; /* not present */
 1259                                 else
 1260                                         arm_picache_ways = 1;
 1261                         } else {
 1262                                 arm_picache_ways = multiplier <<
 1263                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
 1264                         }
 1265                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
 1266                 }
 1267 
 1268                 dsize = CPU_CT_DSIZE(ctype);
 1269                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
 1270                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
 1271                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
 1272                         if (dsize & CPU_CT_xSIZE_M)
 1273                                 arm_pdcache_line_size = 0; /* not present */
 1274                         else
 1275                                 arm_pdcache_ways = 1;
 1276                 } else {
 1277                         arm_pdcache_ways = multiplier <<
 1278                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
 1279                 }
 1280                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
 1281 
 1282                 arm_dcache_align = arm_pdcache_line_size;
 1283 
 1284                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
 1285                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
 1286                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
 1287                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
 1288 
 1289         out:
 1290                 arm_dcache_align_mask = arm_dcache_align - 1;
 1291         }
 1292 }
 1293 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
 1294 
 1295 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
 1296     defined(CPU_IXP12X0)
 1297 /* Cache information for CPUs without cache type registers. */
 1298 struct cachetab {
 1299         u_int32_t ct_cpuid;
 1300         int     ct_pcache_type;
 1301         int     ct_pcache_unified;
 1302         int     ct_pdcache_size;
 1303         int     ct_pdcache_line_size;
 1304         int     ct_pdcache_ways;
 1305         int     ct_picache_size;
 1306         int     ct_picache_line_size;
 1307         int     ct_picache_ways;
 1308 };
 1309 
 1310 struct cachetab cachetab[] = {
 1311     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
 1312     /* XXX is this type right for SA-1? */
 1313     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
 1314     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
 1315     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
 1316     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
 1317     { 0, 0, 0, 0, 0, 0, 0, 0}
 1318 };
 1319 
 1320 static void get_cachetype_table(void);
 1321 
 1322 static void
 1323 get_cachetype_table()
 1324 {
 1325         int i;
 1326         u_int32_t cpuid = cpufunc_id();
 1327 
 1328         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
 1329                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
 1330                         arm_pcache_type = cachetab[i].ct_pcache_type;
 1331                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
 1332                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
 1333                         arm_pdcache_line_size =
 1334                             cachetab[i].ct_pdcache_line_size;
 1335                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
 1336                         arm_picache_size = cachetab[i].ct_picache_size;
 1337                         arm_picache_line_size =
 1338                             cachetab[i].ct_picache_line_size;
 1339                         arm_picache_ways = cachetab[i].ct_picache_ways;
 1340                 }
 1341         }
 1342         arm_dcache_align = arm_pdcache_line_size;
 1343 
 1344         arm_dcache_align_mask = arm_dcache_align - 1;
 1345 }
 1346 
 1347 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
 1348 
 1349 /*
 1350  * Cannot panic here as we may not have a console yet ...
 1351  */
 1352 
 1353 int
 1354 set_cpufuncs()
 1355 {
 1356         cputype = cpufunc_id();
 1357         cputype &= CPU_ID_CPU_MASK;
 1358 
 1359         /*
 1360          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
 1361          * CPU type where we want to use it by default, then we set it.
 1362          */
 1363 
 1364 #ifdef CPU_ARM7TDMI
 1365         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
 1366             CPU_ID_IS7(cputype) &&
 1367             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
 1368                 cpufuncs = arm7tdmi_cpufuncs;
 1369                 cpu_reset_needs_v4_MMU_disable = 0;
 1370                 get_cachetype_cp15();
 1371                 pmap_pte_init_generic();
 1372                 goto out;
 1373         }
 1374 #endif  
 1375 #ifdef CPU_ARM8
 1376         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
 1377             (cputype & 0x0000f000) == 0x00008000) {
 1378                 cpufuncs = arm8_cpufuncs;
 1379                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
 1380                 get_cachetype_cp15();
 1381                 pmap_pte_init_arm8();
 1382                 goto out;
 1383         }
 1384 #endif  /* CPU_ARM8 */
 1385 #ifdef CPU_ARM9
 1386         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
 1387              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
 1388             (cputype & 0x0000f000) == 0x00009000) {
 1389                 cpufuncs = arm9_cpufuncs;
 1390                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1391                 get_cachetype_cp15();
 1392                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
 1393                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
 1394                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
 1395                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
 1396                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
 1397 #ifdef ARM9_CACHE_WRITE_THROUGH
 1398                 pmap_pte_init_arm9();
 1399 #else
 1400                 pmap_pte_init_generic();
 1401 #endif
 1402                 goto out;
 1403         }
 1404 #endif /* CPU_ARM9 */
 1405 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
 1406         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
 1407             cputype == CPU_ID_MV88FR571_41) {
 1408                 uint32_t sheeva_ctrl;
 1409 
 1410                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
 1411                     MV_L2_ENABLE);
 1412                 /*
 1413                  * Workaround for Marvell MV78100 CPU: Cache prefetch
 1414                  * mechanism may affect the cache coherency validity,
 1415                  * so it needs to be disabled.
 1416                  *
 1417                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
 1418                  * L2 Prefetching Mechanism) for details.
 1419                  */
 1420                 if (cputype == CPU_ID_MV88FR571_VD ||
 1421                     cputype == CPU_ID_MV88FR571_41)
 1422                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
 1423 
 1424                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
 1425 
 1426                 cpufuncs = sheeva_cpufuncs;
 1427                 get_cachetype_cp15();
 1428                 pmap_pte_init_generic();
 1429                 goto out;
 1430         } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
 1431                 cpufuncs = armv5_ec_cpufuncs;
 1432                 get_cachetype_cp15();
 1433                 pmap_pte_init_generic();
 1434                 goto out;
 1435         }
 1436 #endif /* CPU_ARM9E || CPU_ARM10 */
 1437 #ifdef CPU_ARM10
 1438         if (/* cputype == CPU_ID_ARM1020T || */
 1439             cputype == CPU_ID_ARM1020E) {
 1440                 /*
 1441                  * Select write-through cacheing (this isn't really an
 1442                  * option on ARM1020T).
 1443                  */
 1444                 cpufuncs = arm10_cpufuncs;
 1445                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1446                 get_cachetype_cp15();
 1447                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
 1448                 arm10_dcache_sets_max =
 1449                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
 1450                     arm10_dcache_sets_inc;
 1451                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
 1452                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
 1453                 pmap_pte_init_generic();
 1454                 goto out;
 1455         }
 1456 #endif /* CPU_ARM10 */
 1457 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
 1458         if (cputype == CPU_ID_ARM1136JS
 1459             || cputype == CPU_ID_ARM1136JSR1
 1460             || cputype == CPU_ID_ARM1176JZS) {
 1461 #ifdef CPU_ARM1136
 1462                 if (cputype == CPU_ID_ARM1136JS
 1463                     || cputype == CPU_ID_ARM1136JSR1)
 1464                         cpufuncs = arm1136_cpufuncs;
 1465 #endif
 1466 #ifdef CPU_ARM1176
 1467                 if (cputype == CPU_ID_ARM1176JZS)
 1468                         cpufuncs = arm1176_cpufuncs;
 1469 #endif
 1470                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1471                 get_cachetype_cp15();
 1472 
 1473                 pmap_pte_init_mmu_v6();
 1474 
 1475                 goto out;
 1476         }
 1477 #endif /* CPU_ARM1136 || CPU_ARM1176 */
 1478 #ifdef CPU_CORTEXA
 1479         if (cputype == CPU_ID_CORTEXA7 ||
 1480             cputype == CPU_ID_CORTEXA8R1 ||
 1481             cputype == CPU_ID_CORTEXA8R2 ||
 1482             cputype == CPU_ID_CORTEXA8R3 ||
 1483             cputype == CPU_ID_CORTEXA9R1 ||
 1484             cputype == CPU_ID_CORTEXA9R2 ||
 1485             cputype == CPU_ID_CORTEXA9R3 ||
 1486             cputype == CPU_ID_CORTEXA15 ) {
 1487                 cpufuncs = cortexa_cpufuncs;
 1488                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1489                 get_cachetype_cp15();
 1490                 
 1491                 pmap_pte_init_mmu_v6();
 1492                 /* Use powersave on this CPU. */
 1493                 cpu_do_powersave = 1;
 1494                 goto out;
 1495         }
 1496 #endif /* CPU_CORTEXA */
 1497                 
 1498 #if defined(CPU_MV_PJ4B)
 1499         if (cputype == CPU_ID_MV88SV581X_V6 ||
 1500             cputype == CPU_ID_MV88SV581X_V7 ||
 1501             cputype == CPU_ID_MV88SV584X_V7 ||
 1502             cputype == CPU_ID_ARM_88SV581X_V6 ||
 1503             cputype == CPU_ID_ARM_88SV581X_V7) {
 1504                 if (cpu_pfr(0) & ARM_PFR0_THUMBEE_MASK)
 1505                         cpufuncs = pj4bv7_cpufuncs;
 1506                 else
 1507                         cpufuncs = pj4bv6_cpufuncs;
 1508 
 1509                 get_cachetype_cp15();
 1510                 pmap_pte_init_mmu_v6();
 1511                 goto out;
 1512         } else if (cputype == CPU_ID_ARM_88SV584X_V6 ||
 1513             cputype == CPU_ID_MV88SV584X_V6) {
 1514                 cpufuncs = pj4bv6_cpufuncs;
 1515                 get_cachetype_cp15();
 1516                 pmap_pte_init_mmu_v6();
 1517                 goto out;
 1518         }
 1519 
 1520 #endif /* CPU_MV_PJ4B */
 1521 #ifdef CPU_SA110
 1522         if (cputype == CPU_ID_SA110) {
 1523                 cpufuncs = sa110_cpufuncs;
 1524                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
 1525                 get_cachetype_table();
 1526                 pmap_pte_init_sa1();
 1527                 goto out;
 1528         }
 1529 #endif  /* CPU_SA110 */
 1530 #ifdef CPU_SA1100
 1531         if (cputype == CPU_ID_SA1100) {
 1532                 cpufuncs = sa11x0_cpufuncs;
 1533                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1534                 get_cachetype_table();
 1535                 pmap_pte_init_sa1();
 1536                 /* Use powersave on this CPU. */
 1537                 cpu_do_powersave = 1;
 1538 
 1539                 goto out;
 1540         }
 1541 #endif  /* CPU_SA1100 */
 1542 #ifdef CPU_SA1110
 1543         if (cputype == CPU_ID_SA1110) {
 1544                 cpufuncs = sa11x0_cpufuncs;
 1545                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1546                 get_cachetype_table();
 1547                 pmap_pte_init_sa1();
 1548                 /* Use powersave on this CPU. */
 1549                 cpu_do_powersave = 1;
 1550 
 1551                 goto out;
 1552         }
 1553 #endif  /* CPU_SA1110 */
 1554 #if defined(CPU_FA526) || defined(CPU_FA626TE)
 1555         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
 1556                 cpufuncs = fa526_cpufuncs;
 1557                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1558                 get_cachetype_cp15();
 1559                 pmap_pte_init_generic();
 1560 
 1561                 /* Use powersave on this CPU. */
 1562                 cpu_do_powersave = 1;
 1563 
 1564                 goto out;
 1565         }
 1566 #endif  /* CPU_FA526 || CPU_FA626TE */
 1567 #ifdef CPU_IXP12X0
 1568         if (cputype == CPU_ID_IXP1200) {
 1569                 cpufuncs = ixp12x0_cpufuncs;
 1570                 cpu_reset_needs_v4_MMU_disable = 1;
 1571                 get_cachetype_table();
 1572                 pmap_pte_init_sa1();
 1573                 goto out;
 1574         }
 1575 #endif  /* CPU_IXP12X0 */
 1576 #ifdef CPU_XSCALE_80200
 1577         if (cputype == CPU_ID_80200) {
 1578                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
 1579 
 1580                 i80200_icu_init();
 1581 
 1582 #if defined(XSCALE_CCLKCFG)
 1583                 /*
 1584                  * Crank CCLKCFG to maximum legal value.
 1585                  */
 1586                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
 1587                         :
 1588                         : "r" (XSCALE_CCLKCFG));
 1589 #endif
 1590 
 1591                 /*
 1592                  * XXX Disable ECC in the Bus Controller Unit; we
 1593                  * don't really support it, yet.  Clear any pending
 1594                  * error indications.
 1595                  */
 1596                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
 1597                         :
 1598                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
 1599 
 1600                 cpufuncs = xscale_cpufuncs;
 1601                 /*
 1602                  * i80200 errata: Step-A0 and A1 have a bug where
 1603                  * D$ dirty bits are not cleared on "invalidate by
 1604                  * address".
 1605                  *
 1606                  * Workaround: Clean cache line before invalidating.
 1607                  */
 1608                 if (rev == 0 || rev == 1)
 1609                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
 1610 
 1611                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1612                 get_cachetype_cp15();
 1613                 pmap_pte_init_xscale();
 1614                 goto out;
 1615         }
 1616 #endif /* CPU_XSCALE_80200 */
 1617 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
 1618         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
 1619             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
 1620             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
 1621                 cpufuncs = xscale_cpufuncs;
 1622                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1623                 get_cachetype_cp15();
 1624                 pmap_pte_init_xscale();
 1625                 goto out;
 1626         }
 1627 #endif /* CPU_XSCALE_80321 */
 1628 
 1629 #if defined(CPU_XSCALE_81342)
 1630         if (cputype == CPU_ID_81342) {
 1631                 cpufuncs = xscalec3_cpufuncs;
 1632                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1633                 get_cachetype_cp15();
 1634                 pmap_pte_init_xscale();
 1635                 goto out;
 1636         }
 1637 #endif /* CPU_XSCALE_81342 */
 1638 #ifdef CPU_XSCALE_PXA2X0
 1639         /* ignore core revision to test PXA2xx CPUs */
 1640         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
 1641             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
 1642             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
 1643 
 1644                 cpufuncs = xscale_cpufuncs;
 1645                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1646                 get_cachetype_cp15();
 1647                 pmap_pte_init_xscale();
 1648 
 1649                 /* Use powersave on this CPU. */
 1650                 cpu_do_powersave = 1;
 1651 
 1652                 goto out;
 1653         }
 1654 #endif /* CPU_XSCALE_PXA2X0 */
 1655 #ifdef CPU_XSCALE_IXP425
 1656         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
 1657             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
 1658 
 1659                 cpufuncs = xscale_cpufuncs;
 1660                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1661                 get_cachetype_cp15();
 1662                 pmap_pte_init_xscale();
 1663 
 1664                 goto out;
 1665         }
 1666 #endif /* CPU_XSCALE_IXP425 */
 1667         /*
 1668          * Bzzzz. And the answer was ...
 1669          */
 1670         panic("No support for this CPU type (%08x) in kernel", cputype);
 1671         return(ARCHITECTURE_NOT_PRESENT);
 1672 out:
 1673         uma_set_align(arm_dcache_align_mask);
 1674         return (0);
 1675 }
 1676 
 1677 /*
 1678  * Fixup routines for data and prefetch aborts.
 1679  *
 1680  * Several compile time symbols are used
 1681  *
 1682  * DEBUG_FAULT_CORRECTION - Print debugging information during the
 1683  * correction of registers after a fault.
 1684  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
 1685  * when defined should use late aborts
 1686  */
 1687 
 1688 
 1689 /*
 1690  * Null abort fixup routine.
 1691  * For use when no fixup is required.
 1692  */
 1693 int
 1694 cpufunc_null_fixup(arg)
 1695         void *arg;
 1696 {
 1697         return(ABORT_FIXUP_OK);
 1698 }
 1699 
 1700 
 1701 #if defined(CPU_ARM7TDMI)
 1702 
 1703 #ifdef DEBUG_FAULT_CORRECTION
 1704 #define DFC_PRINTF(x)           printf x
 1705 #define DFC_DISASSEMBLE(x)      disassemble(x)
 1706 #else
 1707 #define DFC_PRINTF(x)           /* nothing */
 1708 #define DFC_DISASSEMBLE(x)      /* nothing */
 1709 #endif
 1710 
 1711 /*
 1712  * "Early" data abort fixup.
 1713  *
 1714  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
 1715  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
 1716  *
 1717  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
 1718  */
 1719 int
 1720 early_abort_fixup(arg)
 1721         void *arg;
 1722 {
 1723         trapframe_t *frame = arg;
 1724         u_int fault_pc;
 1725         u_int fault_instruction;
 1726         int saved_lr = 0;
 1727 
 1728         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1729 
 1730                 /* Ok an abort in SVC mode */
 1731 
 1732                 /*
 1733                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1734                  * as the fault happened in svc mode but we need it in the
 1735                  * usr slot so we can treat the registers as an array of ints
 1736                  * during fixing.
 1737                  * NOTE: This PC is in the position but writeback is not
 1738                  * allowed on r15.
 1739                  * Doing it like this is more efficient than trapping this
 1740                  * case in all possible locations in the following fixup code.
 1741                  */
 1742 
 1743                 saved_lr = frame->tf_usr_lr;
 1744                 frame->tf_usr_lr = frame->tf_svc_lr;
 1745 
 1746                 /*
 1747                  * Note the trapframe does not have the SVC r13 so a fault
 1748                  * from an instruction with writeback to r13 in SVC mode is
 1749                  * not allowed. This should not happen as the kstack is
 1750                  * always valid.
 1751                  */
 1752         }
 1753 
 1754         /* Get fault address and status from the CPU */
 1755 
 1756         fault_pc = frame->tf_pc;
 1757         fault_instruction = *((volatile unsigned int *)fault_pc);
 1758 
 1759         /* Decode the fault instruction and fix the registers as needed */
 1760 
 1761         if ((fault_instruction & 0x0e000000) == 0x08000000) {
 1762                 int base;
 1763                 int loop;
 1764                 int count;
 1765                 int *registers = &frame->tf_r0;
 1766 
 1767                 DFC_PRINTF(("LDM/STM\n"));
 1768                 DFC_DISASSEMBLE(fault_pc);
 1769                 if (fault_instruction & (1 << 21)) {
 1770                         DFC_PRINTF(("This instruction must be corrected\n"));
 1771                         base = (fault_instruction >> 16) & 0x0f;
 1772                         if (base == 15)
 1773                                 return ABORT_FIXUP_FAILED;
 1774                         /* Count registers transferred */
 1775                         count = 0;
 1776                         for (loop = 0; loop < 16; ++loop) {
 1777                                 if (fault_instruction & (1<<loop))
 1778                                         ++count;
 1779                         }
 1780                         DFC_PRINTF(("%d registers used\n", count));
 1781                         DFC_PRINTF(("Corrected r%d by %d bytes ",
 1782                                        base, count * 4));
 1783                         if (fault_instruction & (1 << 23)) {
 1784                                 DFC_PRINTF(("down\n"));
 1785                                 registers[base] -= count * 4;
 1786                         } else {
 1787                                 DFC_PRINTF(("up\n"));
 1788                                 registers[base] += count * 4;
 1789                         }
 1790                 }
 1791         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
 1792                 int base;
 1793                 int offset;
 1794                 int *registers = &frame->tf_r0;
 1795         
 1796                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
 1797 
 1798                 DFC_DISASSEMBLE(fault_pc);
 1799 
 1800                 /* Only need to fix registers if write back is turned on */
 1801 
 1802                 if ((fault_instruction & (1 << 21)) != 0) {
 1803                         base = (fault_instruction >> 16) & 0x0f;
 1804                         if (base == 13 &&
 1805                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
 1806                                 return ABORT_FIXUP_FAILED;
 1807                         if (base == 15)
 1808                                 return ABORT_FIXUP_FAILED;
 1809 
 1810                         offset = (fault_instruction & 0xff) << 2;
 1811                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1812                         if ((fault_instruction & (1 << 23)) != 0)
 1813                                 offset = -offset;
 1814                         registers[base] += offset;
 1815                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1816                 }
 1817         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
 1818                 return ABORT_FIXUP_FAILED;
 1819 
 1820         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1821 
 1822                 /* Ok an abort in SVC mode */
 1823 
 1824                 /*
 1825                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1826                  * as the fault happened in svc mode but we need it in the
 1827                  * usr slot so we can treat the registers as an array of ints
 1828                  * during fixing.
 1829                  * NOTE: This PC is in the position but writeback is not
 1830                  * allowed on r15.
 1831                  * Doing it like this is more efficient than trapping this
 1832                  * case in all possible locations in the prior fixup code.
 1833                  */
 1834 
 1835                 frame->tf_svc_lr = frame->tf_usr_lr;
 1836                 frame->tf_usr_lr = saved_lr;
 1837 
 1838                 /*
 1839                  * Note the trapframe does not have the SVC r13 so a fault
 1840                  * from an instruction with writeback to r13 in SVC mode is
 1841                  * not allowed. This should not happen as the kstack is
 1842                  * always valid.
 1843                  */
 1844         }
 1845 
 1846         return(ABORT_FIXUP_OK);
 1847 }
 1848 #endif  /* CPU_ARM2/250/3/6/7 */
 1849 
 1850 
 1851 #if defined(CPU_ARM7TDMI)
 1852 /*
 1853  * "Late" (base updated) data abort fixup
 1854  *
 1855  * For ARM6 (in late-abort mode) and ARM7.
 1856  *
 1857  * In this model, all data-transfer instructions need fixing up.  We defer
 1858  * LDM, STM, LDC and STC fixup to the early-abort handler.
 1859  */
 1860 int
 1861 late_abort_fixup(arg)
 1862         void *arg;
 1863 {
 1864         trapframe_t *frame = arg;
 1865         u_int fault_pc;
 1866         u_int fault_instruction;
 1867         int saved_lr = 0;
 1868 
 1869         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1870 
 1871                 /* Ok an abort in SVC mode */
 1872 
 1873                 /*
 1874                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1875                  * as the fault happened in svc mode but we need it in the
 1876                  * usr slot so we can treat the registers as an array of ints
 1877                  * during fixing.
 1878                  * NOTE: This PC is in the position but writeback is not
 1879                  * allowed on r15.
 1880                  * Doing it like this is more efficient than trapping this
 1881                  * case in all possible locations in the following fixup code.
 1882                  */
 1883 
 1884                 saved_lr = frame->tf_usr_lr;
 1885                 frame->tf_usr_lr = frame->tf_svc_lr;
 1886 
 1887                 /*
 1888                  * Note the trapframe does not have the SVC r13 so a fault
 1889                  * from an instruction with writeback to r13 in SVC mode is
 1890                  * not allowed. This should not happen as the kstack is
 1891                  * always valid.
 1892                  */
 1893         }
 1894 
 1895         /* Get fault address and status from the CPU */
 1896 
 1897         fault_pc = frame->tf_pc;
 1898         fault_instruction = *((volatile unsigned int *)fault_pc);
 1899 
 1900         /* Decode the fault instruction and fix the registers as needed */
 1901 
 1902         /* Was is a swap instruction ? */
 1903 
 1904         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
 1905                 DFC_DISASSEMBLE(fault_pc);
 1906         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
 1907 
 1908                 /* Was is a ldr/str instruction */
 1909                 /* This is for late abort only */
 1910 
 1911                 int base;
 1912                 int offset;
 1913                 int *registers = &frame->tf_r0;
 1914 
 1915                 DFC_DISASSEMBLE(fault_pc);
 1916                 
 1917                 /* This is for late abort only */
 1918 
 1919                 if ((fault_instruction & (1 << 24)) == 0
 1920                     || (fault_instruction & (1 << 21)) != 0) {  
 1921                         /* postindexed ldr/str with no writeback */
 1922 
 1923                         base = (fault_instruction >> 16) & 0x0f;
 1924                         if (base == 13 &&
 1925                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
 1926                                 return ABORT_FIXUP_FAILED;
 1927                         if (base == 15)
 1928                                 return ABORT_FIXUP_FAILED;
 1929                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
 1930                                        base, registers[base]));
 1931                         if ((fault_instruction & (1 << 25)) == 0) {
 1932                                 /* Immediate offset - easy */
 1933 
 1934                                 offset = fault_instruction & 0xfff;
 1935                                 if ((fault_instruction & (1 << 23)))
 1936                                         offset = -offset;
 1937                                 registers[base] += offset;
 1938                                 DFC_PRINTF(("imm=%08x ", offset));
 1939                         } else {
 1940                                 /* offset is a shifted register */
 1941                                 int shift;
 1942 
 1943                                 offset = fault_instruction & 0x0f;
 1944                                 if (offset == base)
 1945                                         return ABORT_FIXUP_FAILED;
 1946 
 1947                                 /*
 1948                                  * Register offset - hard we have to
 1949                                  * cope with shifts !
 1950                                  */
 1951                                 offset = registers[offset];
 1952 
 1953                                 if ((fault_instruction & (1 << 4)) == 0)
 1954                                         /* shift with amount */
 1955                                         shift = (fault_instruction >> 7) & 0x1f;
 1956                                 else {
 1957                                         /* shift with register */
 1958                                         if ((fault_instruction & (1 << 7)) != 0)
 1959                                                 /* undefined for now so bail out */
 1960                                                 return ABORT_FIXUP_FAILED;
 1961                                         shift = ((fault_instruction >> 8) & 0xf);
 1962                                         if (base == shift)
 1963                                                 return ABORT_FIXUP_FAILED;
 1964                                         DFC_PRINTF(("shift reg=%d ", shift));
 1965                                         shift = registers[shift];
 1966                                 }
 1967                                 DFC_PRINTF(("shift=%08x ", shift));
 1968                                 switch (((fault_instruction >> 5) & 0x3)) {
 1969                                 case 0 : /* Logical left */
 1970                                         offset = (int)(((u_int)offset) << shift);
 1971                                         break;
 1972                                 case 1 : /* Logical Right */
 1973                                         if (shift == 0) shift = 32;
 1974                                         offset = (int)(((u_int)offset) >> shift);
 1975                                         break;
 1976                                 case 2 : /* Arithmetic Right */
 1977                                         if (shift == 0) shift = 32;
 1978                                         offset = (int)(((int)offset) >> shift);
 1979                                         break;
 1980                                 case 3 : /* Rotate right (rol or rxx) */
 1981                                         return ABORT_FIXUP_FAILED;
 1982                                         break;
 1983                                 }
 1984 
 1985                                 DFC_PRINTF(("abt: fixed LDR/STR with "
 1986                                                "register offset\n"));
 1987                                 if ((fault_instruction & (1 << 23)))
 1988                                         offset = -offset;
 1989                                 DFC_PRINTF(("offset=%08x ", offset));
 1990                                 registers[base] += offset;
 1991                         }
 1992                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1993                 }
 1994         }
 1995 
 1996         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1997 
 1998                 /* Ok an abort in SVC mode */
 1999 
 2000                 /*
 2001                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 2002                  * as the fault happened in svc mode but we need it in the
 2003                  * usr slot so we can treat the registers as an array of ints
 2004                  * during fixing.
 2005                  * NOTE: This PC is in the position but writeback is not
 2006                  * allowed on r15.
 2007                  * Doing it like this is more efficient than trapping this
 2008                  * case in all possible locations in the prior fixup code.
 2009                  */
 2010 
 2011                 frame->tf_svc_lr = frame->tf_usr_lr;
 2012                 frame->tf_usr_lr = saved_lr;
 2013 
 2014                 /*
 2015                  * Note the trapframe does not have the SVC r13 so a fault
 2016                  * from an instruction with writeback to r13 in SVC mode is
 2017                  * not allowed. This should not happen as the kstack is
 2018                  * always valid.
 2019                  */
 2020         }
 2021 
 2022         /*
 2023          * Now let the early-abort fixup routine have a go, in case it
 2024          * was an LDM, STM, LDC or STC that faulted.
 2025          */
 2026 
 2027         return early_abort_fixup(arg);
 2028 }
 2029 #endif  /* CPU_ARM7TDMI */
 2030 
 2031 /*
 2032  * CPU Setup code
 2033  */
 2034 
 2035 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
 2036   defined(CPU_ARM9E) || \
 2037   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
 2038   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
 2039   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
 2040   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
 2041   defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
 2042   defined(CPU_FA526) || defined(CPU_FA626TE)
 2043 
 2044 #define IGN     0
 2045 #define OR      1
 2046 #define BIC     2
 2047 
 2048 struct cpu_option {
 2049         char    *co_name;
 2050         int     co_falseop;
 2051         int     co_trueop;
 2052         int     co_value;
 2053 };
 2054 
 2055 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
 2056 
 2057 static u_int
 2058 parse_cpu_options(args, optlist, cpuctrl)
 2059         char *args;
 2060         struct cpu_option *optlist;
 2061         u_int cpuctrl;
 2062 {
 2063         int integer;
 2064 
 2065         if (args == NULL)
 2066                 return(cpuctrl);
 2067 
 2068         while (optlist->co_name) {
 2069                 if (get_bootconf_option(args, optlist->co_name,
 2070                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
 2071                         if (integer) {
 2072                                 if (optlist->co_trueop == OR)
 2073                                         cpuctrl |= optlist->co_value;
 2074                                 else if (optlist->co_trueop == BIC)
 2075                                         cpuctrl &= ~optlist->co_value;
 2076                         } else {
 2077                                 if (optlist->co_falseop == OR)
 2078                                         cpuctrl |= optlist->co_value;
 2079                                 else if (optlist->co_falseop == BIC)
 2080                                         cpuctrl &= ~optlist->co_value;
 2081                         }
 2082                 }
 2083                 ++optlist;
 2084         }
 2085         return(cpuctrl);
 2086 }
 2087 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
 2088 
 2089 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
 2090 struct cpu_option arm678_options[] = {
 2091 #ifdef COMPAT_12
 2092         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
 2093         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 2094 #endif  /* COMPAT_12 */
 2095         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 2096         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 2097         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2098         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2099         { NULL,                 IGN, IGN, 0 }
 2100 };
 2101 
 2102 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
 2103 
 2104 #ifdef CPU_ARM7TDMI
 2105 struct cpu_option arm7tdmi_options[] = {
 2106         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 2107         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 2108         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2109         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2110 #ifdef COMPAT_12
 2111         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
 2112 #endif  /* COMPAT_12 */
 2113         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
 2114         { NULL,                 IGN, IGN, 0 }
 2115 };
 2116 
 2117 void
 2118 arm7tdmi_setup(args)
 2119         char *args;
 2120 {
 2121         int cpuctrl;
 2122 
 2123         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2124                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2125                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
 2126 
 2127         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
 2128         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
 2129 
 2130 #ifdef __ARMEB__
 2131         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2132 #endif
 2133 
 2134         /* Clear out the cache */
 2135         cpu_idcache_wbinv_all();
 2136 
 2137         /* Set the control register */
 2138         ctrl = cpuctrl;
 2139         cpu_control(0xffffffff, cpuctrl);
 2140 }
 2141 #endif  /* CPU_ARM7TDMI */
 2142 
 2143 #ifdef CPU_ARM8
 2144 struct cpu_option arm8_options[] = {
 2145         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 2146         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 2147         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2148         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2149 #ifdef COMPAT_12
 2150         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2151 #endif  /* COMPAT_12 */
 2152         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2153         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2154         { NULL,                 IGN, IGN, 0 }
 2155 };
 2156 
 2157 void
 2158 arm8_setup(args)
 2159         char *args;
 2160 {
 2161         int integer;
 2162         int cpuctrl, cpuctrlmask;
 2163         int clocktest;
 2164         int setclock = 0;
 2165 
 2166         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2167                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2168                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
 2169         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2170                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2171                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
 2172                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
 2173                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
 2174 
 2175 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2176         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2177 #endif
 2178 
 2179         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
 2180         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
 2181 
 2182 #ifdef __ARMEB__
 2183         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2184 #endif
 2185 
 2186         /* Get clock configuration */
 2187         clocktest = arm8_clock_config(0, 0) & 0x0f;
 2188 
 2189         /* Special ARM8 clock and test configuration */
 2190         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 2191                 clocktest = 0;
 2192                 setclock = 1;
 2193         }
 2194         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 2195                 if (integer)
 2196                         clocktest |= 0x01;
 2197                 else
 2198                         clocktest &= ~(0x01);
 2199                 setclock = 1;
 2200         }
 2201         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 2202                 if (integer)
 2203                         clocktest |= 0x02;
 2204                 else
 2205                         clocktest &= ~(0x02);
 2206                 setclock = 1;
 2207         }
 2208         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
 2209                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
 2210                 setclock = 1;
 2211         }
 2212         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
 2213                 clocktest |= (integer & 7) << 5;
 2214                 setclock = 1;
 2215         }
 2216         
 2217         /* Clear out the cache */
 2218         cpu_idcache_wbinv_all();
 2219 
 2220         /* Set the control register */
 2221         ctrl = cpuctrl;
 2222         cpu_control(0xffffffff, cpuctrl);
 2223 
 2224         /* Set the clock/test register */
 2225         if (setclock)
 2226                 arm8_clock_config(0x7f, clocktest);
 2227 }
 2228 #endif  /* CPU_ARM8 */
 2229 
 2230 #ifdef CPU_ARM9
 2231 struct cpu_option arm9_options[] = {
 2232         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2233         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2234         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2235         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2236         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2237         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2238         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2239         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2240         { NULL,                 IGN, IGN, 0 }
 2241 };
 2242 
 2243 void
 2244 arm9_setup(args)
 2245         char *args;
 2246 {
 2247         int cpuctrl, cpuctrlmask;
 2248 
 2249         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2250             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2251             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2252             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
 2253             CPU_CONTROL_ROUNDROBIN;
 2254         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2255                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2256                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2257                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2258                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2259                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
 2260                  | CPU_CONTROL_ROUNDROBIN;
 2261 
 2262 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2263         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2264 #endif
 2265 
 2266         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
 2267 
 2268 #ifdef __ARMEB__
 2269         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2270 #endif
 2271         if (vector_page == ARM_VECTORS_HIGH)
 2272                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2273 
 2274         /* Clear out the cache */
 2275         cpu_idcache_wbinv_all();
 2276 
 2277         /* Set the control register */
 2278         cpu_control(cpuctrlmask, cpuctrl);
 2279         ctrl = cpuctrl;
 2280 
 2281 }
 2282 #endif  /* CPU_ARM9 */
 2283 
 2284 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
 2285 struct cpu_option arm10_options[] = {
 2286         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2287         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2288         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2289         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2290         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2291         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2292         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2293         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2294         { NULL,                 IGN, IGN, 0 }
 2295 };
 2296 
 2297 void
 2298 arm10_setup(args)
 2299         char *args;
 2300 {
 2301         int cpuctrl, cpuctrlmask;
 2302 
 2303         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 2304             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2305             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
 2306         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 2307             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2308             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2309             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2310             | CPU_CONTROL_BPRD_ENABLE
 2311             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
 2312 
 2313 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2314         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2315 #endif
 2316 
 2317         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
 2318 
 2319 #ifdef __ARMEB__
 2320         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2321 #endif
 2322 
 2323         /* Clear out the cache */
 2324         cpu_idcache_wbinv_all();
 2325 
 2326         /* Now really make sure they are clean.  */
 2327         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
 2328 
 2329         if (vector_page == ARM_VECTORS_HIGH)
 2330                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2331 
 2332         /* Set the control register */
 2333         ctrl = cpuctrl;
 2334         cpu_control(0xffffffff, cpuctrl);
 2335 
 2336         /* And again. */
 2337         cpu_idcache_wbinv_all();
 2338 }
 2339 #endif  /* CPU_ARM9E || CPU_ARM10 */
 2340 
 2341 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
 2342 struct cpu_option arm11_options[] = {
 2343         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2344         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2345         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2346         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2347         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2348         { NULL,                 IGN, IGN, 0 }
 2349 };
 2350 
 2351 void
 2352 arm11x6_setup(char *args)
 2353 {
 2354         int cpuctrl, cpuctrl_wax;
 2355         uint32_t auxctrl, auxctrl_wax;
 2356         uint32_t tmp, tmp2;
 2357         uint32_t sbz=0;
 2358         uint32_t cpuid;
 2359 
 2360         cpuid = cpufunc_id();
 2361 
 2362         cpuctrl =
 2363                 CPU_CONTROL_MMU_ENABLE  |
 2364                 CPU_CONTROL_DC_ENABLE   |
 2365                 CPU_CONTROL_WBUF_ENABLE |
 2366                 CPU_CONTROL_32BP_ENABLE |
 2367                 CPU_CONTROL_32BD_ENABLE |
 2368                 CPU_CONTROL_LABT_ENABLE |
 2369                 CPU_CONTROL_SYST_ENABLE |
 2370                 CPU_CONTROL_IC_ENABLE;
 2371 
 2372         /*
 2373          * "write as existing" bits
 2374          * inverse of this is mask
 2375          */
 2376         cpuctrl_wax =
 2377                 (3 << 30) | /* SBZ */
 2378                 (1 << 29) | /* FA */
 2379                 (1 << 28) | /* TR */
 2380                 (3 << 26) | /* SBZ */ 
 2381                 (3 << 19) | /* SBZ */
 2382                 (1 << 17);  /* SBZ */
 2383 
 2384         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
 2385         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
 2386 
 2387         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
 2388 
 2389 #ifdef __ARMEB__
 2390         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2391 #endif
 2392 
 2393         if (vector_page == ARM_VECTORS_HIGH)
 2394                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2395 
 2396         auxctrl = 0;
 2397         auxctrl_wax = ~0;
 2398         /*
 2399          * This options enables the workaround for the 364296 ARM1136
 2400          * r0pX errata (possible cache data corruption with
 2401          * hit-under-miss enabled). It sets the undocumented bit 31 in
 2402          * the auxiliary control register and the FI bit in the control
 2403          * register, thus disabling hit-under-miss without putting the
 2404          * processor into full low interrupt latency mode. ARM11MPCore
 2405          * is not affected.
 2406          */
 2407         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
 2408                 cpuctrl |= CPU_CONTROL_FI_ENABLE;
 2409                 auxctrl = ARM1136_AUXCTL_PFI;
 2410                 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
 2411         }
 2412 
 2413         /*
 2414          * Enable an errata workaround
 2415          */
 2416         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
 2417                 auxctrl = ARM1176_AUXCTL_PHD;
 2418                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
 2419         }
 2420 
 2421         /* Clear out the cache */
 2422         cpu_idcache_wbinv_all();
 2423 
 2424         /* Now really make sure they are clean.  */
 2425         __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
 2426 
 2427         /* Allow detection code to find the VFP if it's fitted.  */
 2428         __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
 2429 
 2430         /* Set the control register */
 2431         ctrl = cpuctrl;
 2432         cpu_control(~cpuctrl_wax, cpuctrl);
 2433 
 2434         __asm volatile ("mrc    p15, 0, %0, c1, c0, 1\n\t"
 2435                         "and    %1, %0, %2\n\t"
 2436                         "orr    %1, %1, %3\n\t"
 2437                         "teq    %0, %1\n\t"
 2438                         "mcrne  p15, 0, %1, c1, c0, 1\n\t"
 2439                         : "=r"(tmp), "=r"(tmp2) :
 2440                           "r"(auxctrl_wax), "r"(auxctrl));
 2441 
 2442         /* And again. */
 2443         cpu_idcache_wbinv_all();
 2444 }
 2445 #endif  /* CPU_ARM1136 || CPU_ARM1176 */
 2446 
 2447 #ifdef CPU_MV_PJ4B
 2448 void
 2449 pj4bv6_setup(char *args)
 2450 {
 2451         int cpuctrl;
 2452 
 2453         pj4b_config();
 2454 
 2455         cpuctrl = CPU_CONTROL_MMU_ENABLE;
 2456 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2457         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2458 #endif
 2459         cpuctrl |= CPU_CONTROL_DC_ENABLE;
 2460         cpuctrl |= (0xf << 3);
 2461 #ifdef __ARMEB__
 2462         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2463 #endif
 2464         cpuctrl |= CPU_CONTROL_SYST_ENABLE;
 2465         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
 2466         cpuctrl |= CPU_CONTROL_IC_ENABLE;
 2467         if (vector_page == ARM_VECTORS_HIGH)
 2468                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2469         cpuctrl |= (0x5 << 16);
 2470         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
 2471         /* XXX not yet */
 2472         /* cpuctrl |= CPU_CONTROL_L2_ENABLE; */
 2473 
 2474         /* Make sure caches are clean.  */
 2475         cpu_idcache_wbinv_all();
 2476         cpu_l2cache_wbinv_all();
 2477 
 2478         /* Set the control register */
 2479         ctrl = cpuctrl;
 2480         cpu_control(0xffffffff, cpuctrl);
 2481 
 2482         cpu_idcache_wbinv_all();
 2483         cpu_l2cache_wbinv_all();
 2484 }
 2485 
 2486 void
 2487 pj4bv7_setup(args)
 2488         char *args;
 2489 {
 2490         int cpuctrl;
 2491 
 2492         pj4b_config();
 2493 
 2494         cpuctrl = CPU_CONTROL_MMU_ENABLE;
 2495 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2496         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2497 #endif
 2498         cpuctrl |= CPU_CONTROL_DC_ENABLE;
 2499         cpuctrl |= (0xf << 3);
 2500         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
 2501         cpuctrl |= CPU_CONTROL_IC_ENABLE;
 2502         if (vector_page == ARM_VECTORS_HIGH)
 2503                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2504         cpuctrl |= (0x5 << 16) | (1 < 22);
 2505         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
 2506 
 2507         /* Clear out the cache */
 2508         cpu_idcache_wbinv_all();
 2509 
 2510         /* Set the control register */
 2511         ctrl = cpuctrl;
 2512         cpu_control(0xFFFFFFFF, cpuctrl);
 2513 
 2514         /* And again. */
 2515         cpu_idcache_wbinv_all();
 2516 }
 2517 #endif /* CPU_MV_PJ4B */
 2518 
 2519 #ifdef CPU_CORTEXA
 2520 
 2521 void
 2522 cortexa_setup(char *args)
 2523 {
 2524         int cpuctrl, cpuctrlmask;
 2525         
 2526         cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
 2527             CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
 2528             CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
 2529             CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
 2530             CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
 2531             CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
 2532         
 2533         cpuctrl = CPU_CONTROL_MMU_ENABLE |
 2534             CPU_CONTROL_IC_ENABLE |
 2535             CPU_CONTROL_DC_ENABLE |
 2536             CPU_CONTROL_BPRD_ENABLE;
 2537         
 2538 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2539         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2540 #endif
 2541         
 2542         /* Switch to big endian */
 2543 #ifdef __ARMEB__
 2544         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2545 #endif
 2546         
 2547         /* Check if the vector page is at the high address (0xffff0000) */
 2548         if (vector_page == ARM_VECTORS_HIGH)
 2549                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2550         
 2551         /* Clear out the cache */
 2552         cpu_idcache_wbinv_all();
 2553         
 2554         /* Set the control register */
 2555         ctrl = cpuctrl;
 2556         cpu_control(cpuctrlmask, cpuctrl);
 2557         
 2558         /* And again. */
 2559         cpu_idcache_wbinv_all();
 2560 #ifdef SMP
 2561         armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
 2562 #endif
 2563 }
 2564 #endif  /* CPU_CORTEXA */
 2565 
 2566 
 2567 #ifdef CPU_SA110
 2568 struct cpu_option sa110_options[] = {
 2569 #ifdef COMPAT_12
 2570         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2571         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 2572 #endif  /* COMPAT_12 */
 2573         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2574         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2575         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2576         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2577         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2578         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2579         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2580         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2581         { NULL,                 IGN, IGN, 0 }
 2582 };
 2583 
 2584 void
 2585 sa110_setup(args)
 2586         char *args;
 2587 {
 2588         int cpuctrl, cpuctrlmask;
 2589 
 2590         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2591                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2592                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2593                  | CPU_CONTROL_WBUF_ENABLE;
 2594         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2595                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2596                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2597                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2598                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2599                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2600                  | CPU_CONTROL_CPCLK;
 2601 
 2602 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2603         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2604 #endif
 2605 
 2606         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
 2607 
 2608 #ifdef __ARMEB__
 2609         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2610 #endif
 2611 
 2612         /* Clear out the cache */
 2613         cpu_idcache_wbinv_all();
 2614 
 2615         /* Set the control register */
 2616         ctrl = cpuctrl;
 2617 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 2618         cpu_control(0xffffffff, cpuctrl);
 2619 
 2620         /*
 2621          * enable clockswitching, note that this doesn't read or write to r0,
 2622          * r0 is just to make it valid asm
 2623          */
 2624         __asm ("mcr 15, 0, r0, c15, c1, 2");
 2625 }
 2626 #endif  /* CPU_SA110 */
 2627 
 2628 #if defined(CPU_SA1100) || defined(CPU_SA1110)
 2629 struct cpu_option sa11x0_options[] = {
 2630 #ifdef COMPAT_12
 2631         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2632         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 2633 #endif  /* COMPAT_12 */
 2634         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2635         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2636         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2637         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2638         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2639         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2640         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2641         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2642         { NULL,                 IGN, IGN, 0 }
 2643 };
 2644 
 2645 void
 2646 sa11x0_setup(args)
 2647         char *args;
 2648 {
 2649         int cpuctrl, cpuctrlmask;
 2650 
 2651         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2652                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2653                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2654                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
 2655         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2656                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2657                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2658                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2659                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2660                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2661                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 2662 
 2663 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2664         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2665 #endif
 2666 
 2667 
 2668         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
 2669 
 2670 #ifdef __ARMEB__
 2671         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2672 #endif
 2673 
 2674         if (vector_page == ARM_VECTORS_HIGH)
 2675                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2676         /* Clear out the cache */
 2677         cpu_idcache_wbinv_all();
 2678         /* Set the control register */
 2679         ctrl = cpuctrl;
 2680         cpu_control(0xffffffff, cpuctrl);
 2681 }
 2682 #endif  /* CPU_SA1100 || CPU_SA1110 */
 2683 
 2684 #if defined(CPU_FA526) || defined(CPU_FA626TE)
 2685 struct cpu_option fa526_options[] = {
 2686 #ifdef COMPAT_12
 2687         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE |
 2688                                            CPU_CONTROL_DC_ENABLE) },
 2689         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 2690 #endif  /* COMPAT_12 */
 2691         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE |
 2692                                            CPU_CONTROL_DC_ENABLE) },
 2693         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE |
 2694                                            CPU_CONTROL_DC_ENABLE) },
 2695         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2696         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2697         { NULL,                 IGN, IGN, 0 }
 2698 };
 2699 
 2700 void
 2701 fa526_setup(char *args)
 2702 {
 2703         int cpuctrl, cpuctrlmask;
 2704 
 2705         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2706                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2707                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2708                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 2709                 | CPU_CONTROL_BPRD_ENABLE;
 2710         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2711                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2712                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2713                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2714                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2715                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2716                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 2717 
 2718 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2719         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2720 #endif
 2721 
 2722         cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
 2723 
 2724 #ifdef __ARMEB__
 2725         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2726 #endif
 2727 
 2728         if (vector_page == ARM_VECTORS_HIGH)
 2729                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2730 
 2731         /* Clear out the cache */
 2732         cpu_idcache_wbinv_all();
 2733 
 2734         /* Set the control register */
 2735         ctrl = cpuctrl;
 2736         cpu_control(0xffffffff, cpuctrl);
 2737 }
 2738 #endif  /* CPU_FA526 || CPU_FA626TE */
 2739 
 2740 
 2741 #if defined(CPU_IXP12X0)
 2742 struct cpu_option ixp12x0_options[] = {
 2743         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2744         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2745         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2746         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2747         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2748         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2749         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2750         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2751         { NULL,                 IGN, IGN, 0 }
 2752 };
 2753 
 2754 void
 2755 ixp12x0_setup(args)
 2756         char *args;
 2757 {
 2758         int cpuctrl, cpuctrlmask;
 2759 
 2760 
 2761         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
 2762                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
 2763                  | CPU_CONTROL_IC_ENABLE;
 2764 
 2765         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2766                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
 2767                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
 2768                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
 2769                  | CPU_CONTROL_VECRELOC;
 2770 
 2771 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2772         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2773 #endif
 2774 
 2775         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
 2776 
 2777 #ifdef __ARMEB__
 2778         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2779 #endif
 2780 
 2781         if (vector_page == ARM_VECTORS_HIGH)
 2782                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2783 
 2784         /* Clear out the cache */
 2785         cpu_idcache_wbinv_all();
 2786 
 2787         /* Set the control register */
 2788         ctrl = cpuctrl;
 2789         /* cpu_control(0xffffffff, cpuctrl); */
 2790         cpu_control(cpuctrlmask, cpuctrl);
 2791 }
 2792 #endif /* CPU_IXP12X0 */
 2793 
 2794 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
 2795   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
 2796   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
 2797 struct cpu_option xscale_options[] = {
 2798 #ifdef COMPAT_12
 2799         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2800         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2801 #endif  /* COMPAT_12 */
 2802         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2803         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2804         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2805         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2806         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2807         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2808         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2809         { NULL,                 IGN, IGN, 0 }
 2810 };
 2811 
 2812 void
 2813 xscale_setup(args)
 2814         char *args;
 2815 {
 2816         uint32_t auxctl;
 2817         int cpuctrl, cpuctrlmask;
 2818 
 2819         /*
 2820          * The XScale Write Buffer is always enabled.  Our option
 2821          * is to enable/disable coalescing.  Note that bits 6:3
 2822          * must always be enabled.
 2823          */
 2824 
 2825         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2826                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2827                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2828                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 2829                  | CPU_CONTROL_BPRD_ENABLE;
 2830         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2831                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2832                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2833                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2834                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2835                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2836                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
 2837                  CPU_CONTROL_L2_ENABLE;
 2838 
 2839 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2840         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2841 #endif
 2842 
 2843         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
 2844 
 2845 #ifdef __ARMEB__
 2846         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2847 #endif
 2848 
 2849         if (vector_page == ARM_VECTORS_HIGH)
 2850                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2851 #ifdef CPU_XSCALE_CORE3
 2852         cpuctrl |= CPU_CONTROL_L2_ENABLE;
 2853 #endif
 2854 
 2855         /* Clear out the cache */
 2856         cpu_idcache_wbinv_all();
 2857 
 2858         /*
 2859          * Set the control register.  Note that bits 6:3 must always
 2860          * be set to 1.
 2861          */
 2862         ctrl = cpuctrl;
 2863 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 2864         cpu_control(0xffffffff, cpuctrl);
 2865 
 2866         /* Make sure write coalescing is turned on */
 2867         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
 2868                 : "=r" (auxctl));
 2869 #ifdef XSCALE_NO_COALESCE_WRITES
 2870         auxctl |= XSCALE_AUXCTL_K;
 2871 #else
 2872         auxctl &= ~XSCALE_AUXCTL_K;
 2873 #endif
 2874 #ifdef CPU_XSCALE_CORE3
 2875         auxctl |= XSCALE_AUXCTL_LLR;
 2876         auxctl |= XSCALE_AUXCTL_MD_MASK;
 2877 #endif
 2878         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
 2879                 : : "r" (auxctl));
 2880 }
 2881 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
 2882            CPU_XSCALE_80219 */

Cache object: b749ec3ba0528e3cc6a55efba79c642a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.