The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
    2 
    3 /*-
    4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
    5  * arm8 support code Copyright (c) 1997 ARM Limited
    6  * arm8 support code Copyright (c) 1997 Causality Limited
    7  * arm9 support code Copyright (C) 2001 ARM Ltd
    8  * Copyright (c) 1997 Mark Brinicombe.
    9  * Copyright (c) 1997 Causality Limited
   10  * All rights reserved.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by Causality Limited.
   23  * 4. The name of Causality Limited may not be used to endorse or promote
   24  *    products derived from this software without specific prior written
   25  *    permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   37  * SUCH DAMAGE.
   38  *
   39  * RiscBSD kernel project
   40  *
   41  * cpufuncs.c
   42  *
   43  * C functions for supporting CPU / MMU / TLB specific operations.
   44  *
   45  * Created      : 30/01/97
   46  */
   47 #include <sys/cdefs.h>
   48 __FBSDID("$FreeBSD$");
   49 
   50 #include <sys/param.h>
   51 #include <sys/systm.h>
   52 #include <sys/lock.h>
   53 #include <sys/mutex.h>
   54 #include <sys/bus.h>
   55 #include <machine/bus.h>
   56 #include <machine/cpu.h>
   57 #include <machine/disassem.h>
   58 
   59 #include <vm/vm.h>
   60 #include <vm/pmap.h>
   61 #include <vm/uma.h>
   62 
   63 #include <machine/cpuconf.h>
   64 #include <machine/cpufunc.h>
   65 #include <machine/bootconfig.h>
   66 
   67 #ifdef CPU_XSCALE_80200
   68 #include <arm/xscale/i80200/i80200reg.h>
   69 #include <arm/xscale/i80200/i80200var.h>
   70 #endif
   71 
   72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
   73 #include <arm/xscale/i80321/i80321reg.h>
   74 #include <arm/xscale/i80321/i80321var.h>
   75 #endif
   76 
   77 #if defined(CPU_XSCALE_81342)
   78 #include <arm/xscale/i8134x/i81342reg.h>
   79 #endif
   80 
   81 #ifdef CPU_XSCALE_IXP425
   82 #include <arm/xscale/ixp425/ixp425reg.h>
   83 #include <arm/xscale/ixp425/ixp425var.h>
   84 #endif
   85 
   86 /* PRIMARY CACHE VARIABLES */
   87 int     arm_picache_size;
   88 int     arm_picache_line_size;
   89 int     arm_picache_ways;
   90 
   91 int     arm_pdcache_size;       /* and unified */
   92 int     arm_pdcache_line_size;
   93 int     arm_pdcache_ways;
   94 
   95 int     arm_pcache_type;
   96 int     arm_pcache_unified;
   97 
   98 int     arm_dcache_align;
   99 int     arm_dcache_align_mask;
  100 
  101 /* 1 == use cpu_sleep(), 0 == don't */
  102 int cpu_do_powersave;
  103 int ctrl;
  104 
  105 #ifdef CPU_ARM7TDMI
  106 struct cpu_functions arm7tdmi_cpufuncs = {
  107         /* CPU functions */
  108         
  109         cpufunc_id,                     /* id                   */
  110         cpufunc_nullop,                 /* cpwait               */
  111 
  112         /* MMU functions */
  113 
  114         cpufunc_control,                /* control              */
  115         cpufunc_domains,                /* domain               */
  116         arm7tdmi_setttb,                /* setttb               */
  117         cpufunc_faultstatus,            /* faultstatus          */
  118         cpufunc_faultaddress,           /* faultaddress         */
  119 
  120         /* TLB functions */
  121 
  122         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
  123         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
  124         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
  125         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
  126         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
  127         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
  128 
  129         /* Cache operations */
  130 
  131         cpufunc_nullop,                 /* icache_sync_all      */
  132         (void *)cpufunc_nullop,         /* icache_sync_range    */
  133 
  134         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
  135         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
  136         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
  137         (void *)cpufunc_nullop,         /* dcache_wb_range      */
  138 
  139         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
  140         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
  141         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  142         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  143         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  144         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  145 
  146         /* Other functions */
  147 
  148         cpufunc_nullop,                 /* flush_prefetchbuf    */
  149         cpufunc_nullop,                 /* drain_writebuf       */
  150         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  151         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  152 
  153         (void *)cpufunc_nullop,         /* sleep                */
  154 
  155         /* Soft functions */
  156 
  157         late_abort_fixup,               /* dataabt_fixup        */
  158         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  159 
  160         arm7tdmi_context_switch,        /* context_switch       */
  161 
  162         arm7tdmi_setup                  /* cpu setup            */
  163 
  164 };
  165 #endif  /* CPU_ARM7TDMI */
  166 
  167 #ifdef CPU_ARM8
  168 struct cpu_functions arm8_cpufuncs = {
  169         /* CPU functions */
  170         
  171         cpufunc_id,                     /* id                   */
  172         cpufunc_nullop,                 /* cpwait               */
  173 
  174         /* MMU functions */
  175 
  176         cpufunc_control,                /* control              */
  177         cpufunc_domains,                /* domain               */
  178         arm8_setttb,                    /* setttb               */
  179         cpufunc_faultstatus,            /* faultstatus          */
  180         cpufunc_faultaddress,           /* faultaddress         */
  181 
  182         /* TLB functions */
  183 
  184         arm8_tlb_flushID,               /* tlb_flushID          */
  185         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
  186         arm8_tlb_flushID,               /* tlb_flushI           */
  187         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
  188         arm8_tlb_flushID,               /* tlb_flushD           */
  189         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
  190 
  191         /* Cache operations */
  192 
  193         cpufunc_nullop,                 /* icache_sync_all      */
  194         (void *)cpufunc_nullop,         /* icache_sync_range    */
  195 
  196         arm8_cache_purgeID,             /* dcache_wbinv_all     */
  197         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
  198 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
  199         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
  200 
  201         arm8_cache_purgeID,             /* idcache_wbinv_all    */
  202         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
  203         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  204         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  205         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  206         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  207 
  208         /* Other functions */
  209 
  210         cpufunc_nullop,                 /* flush_prefetchbuf    */
  211         cpufunc_nullop,                 /* drain_writebuf       */
  212         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  213         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  214 
  215         (void *)cpufunc_nullop,         /* sleep                */
  216 
  217         /* Soft functions */
  218 
  219         cpufunc_null_fixup,             /* dataabt_fixup        */
  220         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  221 
  222         arm8_context_switch,            /* context_switch       */
  223 
  224         arm8_setup                      /* cpu setup            */
  225 };
  226 #endif  /* CPU_ARM8 */
  227 
  228 #ifdef CPU_ARM9
  229 struct cpu_functions arm9_cpufuncs = {
  230         /* CPU functions */
  231 
  232         cpufunc_id,                     /* id                   */
  233         cpufunc_nullop,                 /* cpwait               */
  234 
  235         /* MMU functions */
  236 
  237         cpufunc_control,                /* control              */
  238         cpufunc_domains,                /* Domain               */
  239         arm9_setttb,                    /* Setttb               */
  240         cpufunc_faultstatus,            /* Faultstatus          */
  241         cpufunc_faultaddress,           /* Faultaddress         */
  242 
  243         /* TLB functions */
  244 
  245         armv4_tlb_flushID,              /* tlb_flushID          */
  246         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
  247         armv4_tlb_flushI,               /* tlb_flushI           */
  248         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  249         armv4_tlb_flushD,               /* tlb_flushD           */
  250         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  251 
  252         /* Cache operations */
  253 
  254         arm9_icache_sync_all,           /* icache_sync_all      */
  255         arm9_icache_sync_range,         /* icache_sync_range    */
  256 
  257         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
  258         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
  259         arm9_dcache_inv_range,          /* dcache_inv_range     */
  260         arm9_dcache_wb_range,           /* dcache_wb_range      */
  261 
  262         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
  263         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
  264         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  265         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  266         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  267         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  268 
  269         /* Other functions */
  270 
  271         cpufunc_nullop,                 /* flush_prefetchbuf    */
  272         armv4_drain_writebuf,           /* drain_writebuf       */
  273         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  274         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  275 
  276         (void *)cpufunc_nullop,         /* sleep                */
  277 
  278         /* Soft functions */
  279 
  280         cpufunc_null_fixup,             /* dataabt_fixup        */
  281         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  282 
  283         arm9_context_switch,            /* context_switch       */
  284 
  285         arm9_setup                      /* cpu setup            */
  286 
  287 };
  288 #endif /* CPU_ARM9 */
  289 
  290 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
  291 struct cpu_functions armv5_ec_cpufuncs = {
  292         /* CPU functions */
  293 
  294         cpufunc_id,                     /* id                   */
  295         cpufunc_nullop,                 /* cpwait               */
  296 
  297         /* MMU functions */
  298 
  299         cpufunc_control,                /* control              */
  300         cpufunc_domains,                /* Domain               */
  301         armv5_ec_setttb,                /* Setttb               */
  302         cpufunc_faultstatus,            /* Faultstatus          */
  303         cpufunc_faultaddress,           /* Faultaddress         */
  304 
  305         /* TLB functions */
  306 
  307         armv4_tlb_flushID,              /* tlb_flushID          */
  308         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  309         armv4_tlb_flushI,               /* tlb_flushI           */
  310         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  311         armv4_tlb_flushD,               /* tlb_flushD           */
  312         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  313 
  314         /* Cache operations */
  315 
  316         armv5_ec_icache_sync_all,       /* icache_sync_all      */
  317         armv5_ec_icache_sync_range,     /* icache_sync_range    */
  318 
  319         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
  320         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
  321         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
  322         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
  323 
  324         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
  325         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
  326 
  327         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  328         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  329         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  330         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  331 
  332         /* Other functions */
  333 
  334         cpufunc_nullop,                 /* flush_prefetchbuf    */
  335         armv4_drain_writebuf,           /* drain_writebuf       */
  336         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  337         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  338 
  339         (void *)cpufunc_nullop,         /* sleep                */
  340 
  341         /* Soft functions */
  342 
  343         cpufunc_null_fixup,             /* dataabt_fixup        */
  344         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  345 
  346         arm10_context_switch,           /* context_switch       */
  347 
  348         arm10_setup                     /* cpu setup            */
  349 
  350 };
  351 
  352 struct cpu_functions sheeva_cpufuncs = {
  353         /* CPU functions */
  354 
  355         cpufunc_id,                     /* id                   */
  356         cpufunc_nullop,                 /* cpwait               */
  357 
  358         /* MMU functions */
  359 
  360         cpufunc_control,                /* control              */
  361         cpufunc_domains,                /* Domain               */
  362         sheeva_setttb,                  /* Setttb               */
  363         cpufunc_faultstatus,            /* Faultstatus          */
  364         cpufunc_faultaddress,           /* Faultaddress         */
  365 
  366         /* TLB functions */
  367 
  368         armv4_tlb_flushID,              /* tlb_flushID          */
  369         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  370         armv4_tlb_flushI,               /* tlb_flushI           */
  371         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  372         armv4_tlb_flushD,               /* tlb_flushD           */
  373         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  374 
  375         /* Cache operations */
  376 
  377         armv5_ec_icache_sync_all,       /* icache_sync_all      */
  378         armv5_ec_icache_sync_range,     /* icache_sync_range    */
  379 
  380         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
  381         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
  382         sheeva_dcache_inv_range,        /* dcache_inv_range     */
  383         sheeva_dcache_wb_range,         /* dcache_wb_range      */
  384 
  385         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
  386         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
  387 
  388         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
  389         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
  390         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
  391         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
  392 
  393         /* Other functions */
  394 
  395         cpufunc_nullop,                 /* flush_prefetchbuf    */
  396         armv4_drain_writebuf,           /* drain_writebuf       */
  397         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  398         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  399 
  400         sheeva_cpu_sleep,               /* sleep                */
  401 
  402         /* Soft functions */
  403 
  404         cpufunc_null_fixup,             /* dataabt_fixup        */
  405         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  406 
  407         arm10_context_switch,           /* context_switch       */
  408 
  409         arm10_setup                     /* cpu setup            */
  410 };
  411 #endif /* CPU_ARM9E || CPU_ARM10 */
  412 
  413 #ifdef CPU_ARM10
  414 struct cpu_functions arm10_cpufuncs = {
  415         /* CPU functions */
  416 
  417         cpufunc_id,                     /* id                   */
  418         cpufunc_nullop,                 /* cpwait               */
  419 
  420         /* MMU functions */
  421 
  422         cpufunc_control,                /* control              */
  423         cpufunc_domains,                /* Domain               */
  424         arm10_setttb,                   /* Setttb               */
  425         cpufunc_faultstatus,            /* Faultstatus          */
  426         cpufunc_faultaddress,           /* Faultaddress         */
  427 
  428         /* TLB functions */
  429 
  430         armv4_tlb_flushID,              /* tlb_flushID          */
  431         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  432         armv4_tlb_flushI,               /* tlb_flushI           */
  433         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  434         armv4_tlb_flushD,               /* tlb_flushD           */
  435         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  436 
  437         /* Cache operations */
  438 
  439         arm10_icache_sync_all,          /* icache_sync_all      */
  440         arm10_icache_sync_range,        /* icache_sync_range    */
  441 
  442         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
  443         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
  444         arm10_dcache_inv_range,         /* dcache_inv_range     */
  445         arm10_dcache_wb_range,          /* dcache_wb_range      */
  446 
  447         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
  448         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
  449         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  450         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  451         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  452         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  453 
  454         /* Other functions */
  455 
  456         cpufunc_nullop,                 /* flush_prefetchbuf    */
  457         armv4_drain_writebuf,           /* drain_writebuf       */
  458         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  459         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  460 
  461         (void *)cpufunc_nullop,         /* sleep                */
  462 
  463         /* Soft functions */
  464 
  465         cpufunc_null_fixup,             /* dataabt_fixup        */
  466         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  467 
  468         arm10_context_switch,           /* context_switch       */
  469 
  470         arm10_setup                     /* cpu setup            */
  471 
  472 };
  473 #endif /* CPU_ARM10 */
  474 
  475 #ifdef CPU_SA110
  476 struct cpu_functions sa110_cpufuncs = {
  477         /* CPU functions */
  478         
  479         cpufunc_id,                     /* id                   */
  480         cpufunc_nullop,                 /* cpwait               */
  481 
  482         /* MMU functions */
  483 
  484         cpufunc_control,                /* control              */
  485         cpufunc_domains,                /* domain               */
  486         sa1_setttb,                     /* setttb               */
  487         cpufunc_faultstatus,            /* faultstatus          */
  488         cpufunc_faultaddress,           /* faultaddress         */
  489 
  490         /* TLB functions */
  491 
  492         armv4_tlb_flushID,              /* tlb_flushID          */
  493         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  494         armv4_tlb_flushI,               /* tlb_flushI           */
  495         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  496         armv4_tlb_flushD,               /* tlb_flushD           */
  497         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  498 
  499         /* Cache operations */
  500 
  501         sa1_cache_syncI,                /* icache_sync_all      */
  502         sa1_cache_syncI_rng,            /* icache_sync_range    */
  503 
  504         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  505         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  506 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  507         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  508 
  509         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  510         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  511         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  512         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  513         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  514         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  515 
  516         /* Other functions */
  517 
  518         cpufunc_nullop,                 /* flush_prefetchbuf    */
  519         armv4_drain_writebuf,           /* drain_writebuf       */
  520         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  521         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  522 
  523         (void *)cpufunc_nullop,         /* sleep                */
  524 
  525         /* Soft functions */
  526 
  527         cpufunc_null_fixup,             /* dataabt_fixup        */
  528         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  529 
  530         sa110_context_switch,           /* context_switch       */
  531 
  532         sa110_setup                     /* cpu setup            */
  533 };
  534 #endif  /* CPU_SA110 */
  535 
  536 #if defined(CPU_SA1100) || defined(CPU_SA1110)
  537 struct cpu_functions sa11x0_cpufuncs = {
  538         /* CPU functions */
  539         
  540         cpufunc_id,                     /* id                   */
  541         cpufunc_nullop,                 /* cpwait               */
  542 
  543         /* MMU functions */
  544 
  545         cpufunc_control,                /* control              */
  546         cpufunc_domains,                /* domain               */
  547         sa1_setttb,                     /* setttb               */
  548         cpufunc_faultstatus,            /* faultstatus          */
  549         cpufunc_faultaddress,           /* faultaddress         */
  550 
  551         /* TLB functions */
  552 
  553         armv4_tlb_flushID,              /* tlb_flushID          */
  554         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  555         armv4_tlb_flushI,               /* tlb_flushI           */
  556         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  557         armv4_tlb_flushD,               /* tlb_flushD           */
  558         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  559 
  560         /* Cache operations */
  561 
  562         sa1_cache_syncI,                /* icache_sync_all      */
  563         sa1_cache_syncI_rng,            /* icache_sync_range    */
  564 
  565         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  566         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  567 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  568         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  569 
  570         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  571         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  572         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  573         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  574         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  575         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  576 
  577         /* Other functions */
  578 
  579         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
  580         armv4_drain_writebuf,           /* drain_writebuf       */
  581         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  582         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  583 
  584         sa11x0_cpu_sleep,               /* sleep                */
  585 
  586         /* Soft functions */
  587 
  588         cpufunc_null_fixup,             /* dataabt_fixup        */
  589         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  590 
  591         sa11x0_context_switch,          /* context_switch       */
  592 
  593         sa11x0_setup                    /* cpu setup            */
  594 };
  595 #endif  /* CPU_SA1100 || CPU_SA1110 */
  596 
  597 #ifdef CPU_IXP12X0
  598 struct cpu_functions ixp12x0_cpufuncs = {
  599         /* CPU functions */
  600         
  601         cpufunc_id,                     /* id                   */
  602         cpufunc_nullop,                 /* cpwait               */
  603 
  604         /* MMU functions */
  605 
  606         cpufunc_control,                /* control              */
  607         cpufunc_domains,                /* domain               */
  608         sa1_setttb,                     /* setttb               */
  609         cpufunc_faultstatus,            /* faultstatus          */
  610         cpufunc_faultaddress,           /* faultaddress         */
  611 
  612         /* TLB functions */
  613 
  614         armv4_tlb_flushID,              /* tlb_flushID          */
  615         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  616         armv4_tlb_flushI,               /* tlb_flushI           */
  617         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  618         armv4_tlb_flushD,               /* tlb_flushD           */
  619         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  620 
  621         /* Cache operations */
  622 
  623         sa1_cache_syncI,                /* icache_sync_all      */
  624         sa1_cache_syncI_rng,            /* icache_sync_range    */
  625 
  626         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  627         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  628 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  629         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  630 
  631         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  632         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  633         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  634         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  635         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  636         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  637 
  638         /* Other functions */
  639 
  640         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
  641         armv4_drain_writebuf,           /* drain_writebuf       */
  642         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  643         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  644 
  645         (void *)cpufunc_nullop,         /* sleep                */
  646 
  647         /* Soft functions */
  648 
  649         cpufunc_null_fixup,             /* dataabt_fixup        */
  650         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  651 
  652         ixp12x0_context_switch,         /* context_switch       */
  653 
  654         ixp12x0_setup                   /* cpu setup            */
  655 };
  656 #endif  /* CPU_IXP12X0 */
  657 
  658 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
  659   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
  660   defined(CPU_XSCALE_80219)
  661 
  662 struct cpu_functions xscale_cpufuncs = {
  663         /* CPU functions */
  664         
  665         cpufunc_id,                     /* id                   */
  666         xscale_cpwait,                  /* cpwait               */
  667 
  668         /* MMU functions */
  669 
  670         xscale_control,                 /* control              */
  671         cpufunc_domains,                /* domain               */
  672         xscale_setttb,                  /* setttb               */
  673         cpufunc_faultstatus,            /* faultstatus          */
  674         cpufunc_faultaddress,           /* faultaddress         */
  675 
  676         /* TLB functions */
  677 
  678         armv4_tlb_flushID,              /* tlb_flushID          */
  679         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  680         armv4_tlb_flushI,               /* tlb_flushI           */
  681         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  682         armv4_tlb_flushD,               /* tlb_flushD           */
  683         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  684 
  685         /* Cache operations */
  686 
  687         xscale_cache_syncI,             /* icache_sync_all      */
  688         xscale_cache_syncI_rng,         /* icache_sync_range    */
  689 
  690         xscale_cache_purgeD,            /* dcache_wbinv_all     */
  691         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
  692         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  693         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
  694 
  695         xscale_cache_purgeID,           /* idcache_wbinv_all    */
  696         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
  697         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  698         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  699         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  700         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  701 
  702         /* Other functions */
  703 
  704         cpufunc_nullop,                 /* flush_prefetchbuf    */
  705         armv4_drain_writebuf,           /* drain_writebuf       */
  706         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  707         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  708 
  709         xscale_cpu_sleep,               /* sleep                */
  710 
  711         /* Soft functions */
  712 
  713         cpufunc_null_fixup,             /* dataabt_fixup        */
  714         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  715 
  716         xscale_context_switch,          /* context_switch       */
  717 
  718         xscale_setup                    /* cpu setup            */
  719 };
  720 #endif
  721 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
  722    CPU_XSCALE_80219 */
  723 
  724 #ifdef CPU_XSCALE_81342
  725 struct cpu_functions xscalec3_cpufuncs = {
  726         /* CPU functions */
  727         
  728         cpufunc_id,                     /* id                   */
  729         xscale_cpwait,                  /* cpwait               */
  730 
  731         /* MMU functions */
  732 
  733         xscale_control,                 /* control              */
  734         cpufunc_domains,                /* domain               */
  735         xscalec3_setttb,                /* setttb               */
  736         cpufunc_faultstatus,            /* faultstatus          */
  737         cpufunc_faultaddress,           /* faultaddress         */
  738 
  739         /* TLB functions */
  740 
  741         armv4_tlb_flushID,              /* tlb_flushID          */
  742         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  743         armv4_tlb_flushI,               /* tlb_flushI           */
  744         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  745         armv4_tlb_flushD,               /* tlb_flushD           */
  746         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  747 
  748         /* Cache operations */
  749 
  750         xscalec3_cache_syncI,           /* icache_sync_all      */
  751         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
  752 
  753         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
  754         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
  755         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  756         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
  757 
  758         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
  759         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
  760         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
  761         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
  762         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
  763         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
  764 
  765         /* Other functions */
  766 
  767         cpufunc_nullop,                 /* flush_prefetchbuf    */
  768         armv4_drain_writebuf,           /* drain_writebuf       */
  769         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  770         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  771 
  772         xscale_cpu_sleep,               /* sleep                */
  773 
  774         /* Soft functions */
  775 
  776         cpufunc_null_fixup,             /* dataabt_fixup        */
  777         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  778 
  779         xscalec3_context_switch,        /* context_switch       */
  780 
  781         xscale_setup                    /* cpu setup            */
  782 };
  783 #endif /* CPU_XSCALE_81342 */
  784 
  785 
  786 #if defined(CPU_FA526) || defined(CPU_FA626TE)
  787 struct cpu_functions fa526_cpufuncs = {
  788         /* CPU functions */
  789 
  790         cpufunc_id,                     /* id                   */
  791         cpufunc_nullop,                 /* cpwait               */
  792 
  793         /* MMU functions */
  794 
  795         cpufunc_control,                /* control              */
  796         cpufunc_domains,                /* domain               */
  797         fa526_setttb,                   /* setttb               */
  798         cpufunc_faultstatus,            /* faultstatus          */
  799         cpufunc_faultaddress,           /* faultaddress         */
  800 
  801         /* TLB functions */
  802 
  803         armv4_tlb_flushID,              /* tlb_flushID          */
  804         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
  805         armv4_tlb_flushI,               /* tlb_flushI           */
  806         fa526_tlb_flushI_SE,            /* tlb_flushI_SE        */
  807         armv4_tlb_flushD,               /* tlb_flushD           */
  808         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  809 
  810         /* Cache operations */
  811 
  812         fa526_icache_sync_all,          /* icache_sync_all      */
  813         fa526_icache_sync_range,        /* icache_sync_range    */
  814 
  815         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
  816         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
  817         fa526_dcache_inv_range,         /* dcache_inv_range     */
  818         fa526_dcache_wb_range,          /* dcache_wb_range      */
  819 
  820         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
  821         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
  822         cpufunc_nullop,                 /* l2cache_wbinv_all    */
  823         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
  824         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
  825         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
  826 
  827         /* Other functions */
  828 
  829         fa526_flush_prefetchbuf,        /* flush_prefetchbuf    */
  830         armv4_drain_writebuf,           /* drain_writebuf       */
  831         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  832         fa526_flush_brnchtgt_E,         /* flush_brnchtgt_E     */
  833 
  834         fa526_cpu_sleep,                /* sleep                */
  835 
  836         /* Soft functions */
  837 
  838         cpufunc_null_fixup,             /* dataabt_fixup        */
  839         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  840 
  841         fa526_context_switch,           /* context_switch       */
  842 
  843         fa526_setup                     /* cpu setup            */
  844 };
  845 #endif  /* CPU_FA526 || CPU_FA626TE */
  846 
  847 
  848 /*
  849  * Global constants also used by locore.s
  850  */
  851 
  852 struct cpu_functions cpufuncs;
  853 u_int cputype;
  854 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
  855 
  856 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||  \
  857   defined (CPU_ARM9E) || defined (CPU_ARM10) ||                         \
  858   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
  859   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
  860   defined(CPU_FA526) || defined(CPU_FA626TE) ||                         \
  861   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
  862 
  863 static void get_cachetype_cp15(void);
  864 
  865 /* Additional cache information local to this file.  Log2 of some of the
  866    above numbers.  */
  867 static int      arm_dcache_l2_nsets;
  868 static int      arm_dcache_l2_assoc;
  869 static int      arm_dcache_l2_linesize;
  870 
  871 static void
  872 get_cachetype_cp15()
  873 {
  874         u_int ctype, isize, dsize;
  875         u_int multiplier;
  876 
  877         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
  878                 : "=r" (ctype));
  879 
  880         /*
  881          * ...and thus spake the ARM ARM:
  882          *
  883          * If an <opcode2> value corresponding to an unimplemented or
  884          * reserved ID register is encountered, the System Control
  885          * processor returns the value of the main ID register.
  886          */
  887         if (ctype == cpufunc_id())
  888                 goto out;
  889 
  890         if ((ctype & CPU_CT_S) == 0)
  891                 arm_pcache_unified = 1;
  892 
  893         /*
  894          * If you want to know how this code works, go read the ARM ARM.
  895          */
  896 
  897         arm_pcache_type = CPU_CT_CTYPE(ctype);
  898 
  899         if (arm_pcache_unified == 0) {
  900                 isize = CPU_CT_ISIZE(ctype);
  901                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
  902                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
  903                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
  904                         if (isize & CPU_CT_xSIZE_M)
  905                                 arm_picache_line_size = 0; /* not present */
  906                         else
  907                                 arm_picache_ways = 1;
  908                 } else {
  909                         arm_picache_ways = multiplier <<
  910                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
  911                 }
  912                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
  913         }
  914 
  915         dsize = CPU_CT_DSIZE(ctype);
  916         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
  917         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
  918         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
  919                 if (dsize & CPU_CT_xSIZE_M)
  920                         arm_pdcache_line_size = 0; /* not present */
  921                 else
  922                         arm_pdcache_ways = 1;
  923         } else {
  924                 arm_pdcache_ways = multiplier <<
  925                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
  926         }
  927         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
  928 
  929         arm_dcache_align = arm_pdcache_line_size;
  930 
  931         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
  932         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
  933         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
  934             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
  935 
  936  out:
  937         arm_dcache_align_mask = arm_dcache_align - 1;
  938 }
  939 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
  940 
  941 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
  942     defined(CPU_IXP12X0)
  943 /* Cache information for CPUs without cache type registers. */
  944 struct cachetab {
  945         u_int32_t ct_cpuid;
  946         int     ct_pcache_type;
  947         int     ct_pcache_unified;
  948         int     ct_pdcache_size;
  949         int     ct_pdcache_line_size;
  950         int     ct_pdcache_ways;
  951         int     ct_picache_size;
  952         int     ct_picache_line_size;
  953         int     ct_picache_ways;
  954 };
  955 
  956 struct cachetab cachetab[] = {
  957     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
  958     /* XXX is this type right for SA-1? */
  959     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
  960     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
  961     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
  962     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
  963     { 0, 0, 0, 0, 0, 0, 0, 0}
  964 };
  965 
  966 static void get_cachetype_table(void);
  967 
  968 static void
  969 get_cachetype_table()
  970 {
  971         int i;
  972         u_int32_t cpuid = cpufunc_id();
  973 
  974         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
  975                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
  976                         arm_pcache_type = cachetab[i].ct_pcache_type;
  977                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
  978                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
  979                         arm_pdcache_line_size =
  980                             cachetab[i].ct_pdcache_line_size;
  981                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
  982                         arm_picache_size = cachetab[i].ct_picache_size;
  983                         arm_picache_line_size =
  984                             cachetab[i].ct_picache_line_size;
  985                         arm_picache_ways = cachetab[i].ct_picache_ways;
  986                 }
  987         }
  988         arm_dcache_align = arm_pdcache_line_size;
  989 
  990         arm_dcache_align_mask = arm_dcache_align - 1;
  991 }
  992 
  993 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
  994 
  995 /*
  996  * Cannot panic here as we may not have a console yet ...
  997  */
  998 
  999 int
 1000 set_cpufuncs()
 1001 {
 1002         cputype = cpufunc_id();
 1003         cputype &= CPU_ID_CPU_MASK;
 1004 
 1005         /*
 1006          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
 1007          * CPU type where we want to use it by default, then we set it.
 1008          */
 1009 
 1010 #ifdef CPU_ARM7TDMI
 1011         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
 1012             CPU_ID_IS7(cputype) &&
 1013             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
 1014                 cpufuncs = arm7tdmi_cpufuncs;
 1015                 cpu_reset_needs_v4_MMU_disable = 0;
 1016                 get_cachetype_cp15();
 1017                 pmap_pte_init_generic();
 1018                 goto out;
 1019         }
 1020 #endif  
 1021 #ifdef CPU_ARM8
 1022         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
 1023             (cputype & 0x0000f000) == 0x00008000) {
 1024                 cpufuncs = arm8_cpufuncs;
 1025                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
 1026                 get_cachetype_cp15();
 1027                 pmap_pte_init_arm8();
 1028                 goto out;
 1029         }
 1030 #endif  /* CPU_ARM8 */
 1031 #ifdef CPU_ARM9
 1032         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
 1033              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
 1034             (cputype & 0x0000f000) == 0x00009000) {
 1035                 cpufuncs = arm9_cpufuncs;
 1036                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1037                 get_cachetype_cp15();
 1038                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
 1039                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
 1040                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
 1041                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
 1042                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
 1043 #ifdef ARM9_CACHE_WRITE_THROUGH
 1044                 pmap_pte_init_arm9();
 1045 #else
 1046                 pmap_pte_init_generic();
 1047 #endif
 1048                 goto out;
 1049         }
 1050 #endif /* CPU_ARM9 */
 1051 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
 1052         if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
 1053             cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
 1054             cputype == CPU_ID_MV88FR571_41) {
 1055                 if (cputype == CPU_ID_MV88FR131 ||
 1056                     cputype == CPU_ID_MV88FR571_VD ||
 1057                     cputype == CPU_ID_MV88FR571_41) {
 1058 
 1059                         cpufuncs = sheeva_cpufuncs;
 1060                         /*
 1061                          * Workaround for Marvell MV78100 CPU: Cache prefetch
 1062                          * mechanism may affect the cache coherency validity,
 1063                          * so it needs to be disabled.
 1064                          *
 1065                          * Refer to errata document MV-S501058-00C.pdf (p. 3.1
 1066                          * L2 Prefetching Mechanism) for details.
 1067                          */
 1068                         if (cputype == CPU_ID_MV88FR571_VD ||
 1069                             cputype == CPU_ID_MV88FR571_41) {
 1070                                 sheeva_control_ext(0xffffffff,
 1071                                     FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
 1072                                     FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
 1073                                     FC_L2_PREF_DIS);
 1074                         } else {
 1075                                 sheeva_control_ext(0xffffffff,
 1076                                     FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
 1077                                     FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
 1078                         }
 1079 
 1080                         /* Use powersave on this CPU. */
 1081                         cpu_do_powersave = 1;
 1082                 } else
 1083                         cpufuncs = armv5_ec_cpufuncs;
 1084 
 1085                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1086                 get_cachetype_cp15();
 1087                 pmap_pte_init_generic();
 1088                 goto out;
 1089         }
 1090 #endif /* CPU_ARM9E || CPU_ARM10 */
 1091 #ifdef CPU_ARM10
 1092         if (/* cputype == CPU_ID_ARM1020T || */
 1093             cputype == CPU_ID_ARM1020E) {
 1094                 /*
 1095                  * Select write-through cacheing (this isn't really an
 1096                  * option on ARM1020T).
 1097                  */
 1098                 cpufuncs = arm10_cpufuncs;
 1099                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
 1100                 get_cachetype_cp15();
 1101                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
 1102                 arm10_dcache_sets_max =
 1103                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
 1104                     arm10_dcache_sets_inc;
 1105                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
 1106                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
 1107                 pmap_pte_init_generic();
 1108                 goto out;
 1109         }
 1110 #endif /* CPU_ARM10 */
 1111 #ifdef CPU_SA110
 1112         if (cputype == CPU_ID_SA110) {
 1113                 cpufuncs = sa110_cpufuncs;
 1114                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
 1115                 get_cachetype_table();
 1116                 pmap_pte_init_sa1();
 1117                 goto out;
 1118         }
 1119 #endif  /* CPU_SA110 */
 1120 #ifdef CPU_SA1100
 1121         if (cputype == CPU_ID_SA1100) {
 1122                 cpufuncs = sa11x0_cpufuncs;
 1123                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1124                 get_cachetype_table();
 1125                 pmap_pte_init_sa1();
 1126                 /* Use powersave on this CPU. */
 1127                 cpu_do_powersave = 1;
 1128 
 1129                 goto out;
 1130         }
 1131 #endif  /* CPU_SA1100 */
 1132 #ifdef CPU_SA1110
 1133         if (cputype == CPU_ID_SA1110) {
 1134                 cpufuncs = sa11x0_cpufuncs;
 1135                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1136                 get_cachetype_table();
 1137                 pmap_pte_init_sa1();
 1138                 /* Use powersave on this CPU. */
 1139                 cpu_do_powersave = 1;
 1140 
 1141                 goto out;
 1142         }
 1143 #endif  /* CPU_SA1110 */
 1144 #if defined(CPU_FA526) || defined(CPU_FA626TE)
 1145         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
 1146                 cpufuncs = fa526_cpufuncs;
 1147                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
 1148                 get_cachetype_cp15();
 1149                 pmap_pte_init_generic();
 1150 
 1151                 /* Use powersave on this CPU. */
 1152                 cpu_do_powersave = 1;
 1153 
 1154                 goto out;
 1155         }
 1156 #endif  /* CPU_FA526 || CPU_FA626TE */
 1157 #ifdef CPU_IXP12X0
 1158         if (cputype == CPU_ID_IXP1200) {
 1159                 cpufuncs = ixp12x0_cpufuncs;
 1160                 cpu_reset_needs_v4_MMU_disable = 1;
 1161                 get_cachetype_table();
 1162                 pmap_pte_init_sa1();
 1163                 goto out;
 1164         }
 1165 #endif  /* CPU_IXP12X0 */
 1166 #ifdef CPU_XSCALE_80200
 1167         if (cputype == CPU_ID_80200) {
 1168                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
 1169 
 1170                 i80200_icu_init();
 1171 
 1172 #if defined(XSCALE_CCLKCFG)
 1173                 /*
 1174                  * Crank CCLKCFG to maximum legal value.
 1175                  */
 1176                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
 1177                         :
 1178                         : "r" (XSCALE_CCLKCFG));
 1179 #endif
 1180 
 1181                 /*
 1182                  * XXX Disable ECC in the Bus Controller Unit; we
 1183                  * don't really support it, yet.  Clear any pending
 1184                  * error indications.
 1185                  */
 1186                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
 1187                         :
 1188                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
 1189 
 1190                 cpufuncs = xscale_cpufuncs;
 1191                 /*
 1192                  * i80200 errata: Step-A0 and A1 have a bug where
 1193                  * D$ dirty bits are not cleared on "invalidate by
 1194                  * address".
 1195                  *
 1196                  * Workaround: Clean cache line before invalidating.
 1197                  */
 1198                 if (rev == 0 || rev == 1)
 1199                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
 1200 
 1201                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1202                 get_cachetype_cp15();
 1203                 pmap_pte_init_xscale();
 1204                 goto out;
 1205         }
 1206 #endif /* CPU_XSCALE_80200 */
 1207 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
 1208         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
 1209             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
 1210             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
 1211                 cpufuncs = xscale_cpufuncs;
 1212                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1213                 get_cachetype_cp15();
 1214                 pmap_pte_init_xscale();
 1215                 goto out;
 1216         }
 1217 #endif /* CPU_XSCALE_80321 */
 1218 
 1219 #if defined(CPU_XSCALE_81342)
 1220         if (cputype == CPU_ID_81342) {
 1221                 cpufuncs = xscalec3_cpufuncs;
 1222                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1223                 get_cachetype_cp15();
 1224                 pmap_pte_init_xscale();
 1225                 goto out;
 1226         }
 1227 #endif /* CPU_XSCALE_81342 */
 1228 #ifdef CPU_XSCALE_PXA2X0
 1229         /* ignore core revision to test PXA2xx CPUs */
 1230         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
 1231             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
 1232             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
 1233 
 1234                 cpufuncs = xscale_cpufuncs;
 1235                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1236                 get_cachetype_cp15();
 1237                 pmap_pte_init_xscale();
 1238 
 1239                 /* Use powersave on this CPU. */
 1240                 cpu_do_powersave = 1;
 1241 
 1242                 goto out;
 1243         }
 1244 #endif /* CPU_XSCALE_PXA2X0 */
 1245 #ifdef CPU_XSCALE_IXP425
 1246         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
 1247             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
 1248 
 1249                 cpufuncs = xscale_cpufuncs;
 1250                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
 1251                 get_cachetype_cp15();
 1252                 pmap_pte_init_xscale();
 1253 
 1254                 goto out;
 1255         }
 1256 #endif /* CPU_XSCALE_IXP425 */
 1257         /*
 1258          * Bzzzz. And the answer was ...
 1259          */
 1260         panic("No support for this CPU type (%08x) in kernel", cputype);
 1261         return(ARCHITECTURE_NOT_PRESENT);
 1262 out:
 1263         uma_set_align(arm_dcache_align_mask);
 1264         return (0);
 1265 }
 1266 
 1267 /*
 1268  * Fixup routines for data and prefetch aborts.
 1269  *
 1270  * Several compile time symbols are used
 1271  *
 1272  * DEBUG_FAULT_CORRECTION - Print debugging information during the
 1273  * correction of registers after a fault.
 1274  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
 1275  * when defined should use late aborts
 1276  */
 1277 
 1278 
 1279 /*
 1280  * Null abort fixup routine.
 1281  * For use when no fixup is required.
 1282  */
 1283 int
 1284 cpufunc_null_fixup(arg)
 1285         void *arg;
 1286 {
 1287         return(ABORT_FIXUP_OK);
 1288 }
 1289 
 1290 
 1291 #if defined(CPU_ARM7TDMI)
 1292 
 1293 #ifdef DEBUG_FAULT_CORRECTION
 1294 #define DFC_PRINTF(x)           printf x
 1295 #define DFC_DISASSEMBLE(x)      disassemble(x)
 1296 #else
 1297 #define DFC_PRINTF(x)           /* nothing */
 1298 #define DFC_DISASSEMBLE(x)      /* nothing */
 1299 #endif
 1300 
 1301 /*
 1302  * "Early" data abort fixup.
 1303  *
 1304  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
 1305  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
 1306  *
 1307  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
 1308  */
 1309 int
 1310 early_abort_fixup(arg)
 1311         void *arg;
 1312 {
 1313         trapframe_t *frame = arg;
 1314         u_int fault_pc;
 1315         u_int fault_instruction;
 1316         int saved_lr = 0;
 1317 
 1318         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1319 
 1320                 /* Ok an abort in SVC mode */
 1321 
 1322                 /*
 1323                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1324                  * as the fault happened in svc mode but we need it in the
 1325                  * usr slot so we can treat the registers as an array of ints
 1326                  * during fixing.
 1327                  * NOTE: This PC is in the position but writeback is not
 1328                  * allowed on r15.
 1329                  * Doing it like this is more efficient than trapping this
 1330                  * case in all possible locations in the following fixup code.
 1331                  */
 1332 
 1333                 saved_lr = frame->tf_usr_lr;
 1334                 frame->tf_usr_lr = frame->tf_svc_lr;
 1335 
 1336                 /*
 1337                  * Note the trapframe does not have the SVC r13 so a fault
 1338                  * from an instruction with writeback to r13 in SVC mode is
 1339                  * not allowed. This should not happen as the kstack is
 1340                  * always valid.
 1341                  */
 1342         }
 1343 
 1344         /* Get fault address and status from the CPU */
 1345 
 1346         fault_pc = frame->tf_pc;
 1347         fault_instruction = *((volatile unsigned int *)fault_pc);
 1348 
 1349         /* Decode the fault instruction and fix the registers as needed */
 1350 
 1351         if ((fault_instruction & 0x0e000000) == 0x08000000) {
 1352                 int base;
 1353                 int loop;
 1354                 int count;
 1355                 int *registers = &frame->tf_r0;
 1356 
 1357                 DFC_PRINTF(("LDM/STM\n"));
 1358                 DFC_DISASSEMBLE(fault_pc);
 1359                 if (fault_instruction & (1 << 21)) {
 1360                         DFC_PRINTF(("This instruction must be corrected\n"));
 1361                         base = (fault_instruction >> 16) & 0x0f;
 1362                         if (base == 15)
 1363                                 return ABORT_FIXUP_FAILED;
 1364                         /* Count registers transferred */
 1365                         count = 0;
 1366                         for (loop = 0; loop < 16; ++loop) {
 1367                                 if (fault_instruction & (1<<loop))
 1368                                         ++count;
 1369                         }
 1370                         DFC_PRINTF(("%d registers used\n", count));
 1371                         DFC_PRINTF(("Corrected r%d by %d bytes ",
 1372                                        base, count * 4));
 1373                         if (fault_instruction & (1 << 23)) {
 1374                                 DFC_PRINTF(("down\n"));
 1375                                 registers[base] -= count * 4;
 1376                         } else {
 1377                                 DFC_PRINTF(("up\n"));
 1378                                 registers[base] += count * 4;
 1379                         }
 1380                 }
 1381         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
 1382                 int base;
 1383                 int offset;
 1384                 int *registers = &frame->tf_r0;
 1385         
 1386                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
 1387 
 1388                 DFC_DISASSEMBLE(fault_pc);
 1389 
 1390                 /* Only need to fix registers if write back is turned on */
 1391 
 1392                 if ((fault_instruction & (1 << 21)) != 0) {
 1393                         base = (fault_instruction >> 16) & 0x0f;
 1394                         if (base == 13 &&
 1395                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
 1396                                 return ABORT_FIXUP_FAILED;
 1397                         if (base == 15)
 1398                                 return ABORT_FIXUP_FAILED;
 1399 
 1400                         offset = (fault_instruction & 0xff) << 2;
 1401                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1402                         if ((fault_instruction & (1 << 23)) != 0)
 1403                                 offset = -offset;
 1404                         registers[base] += offset;
 1405                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1406                 }
 1407         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
 1408                 return ABORT_FIXUP_FAILED;
 1409 
 1410         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1411 
 1412                 /* Ok an abort in SVC mode */
 1413 
 1414                 /*
 1415                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1416                  * as the fault happened in svc mode but we need it in the
 1417                  * usr slot so we can treat the registers as an array of ints
 1418                  * during fixing.
 1419                  * NOTE: This PC is in the position but writeback is not
 1420                  * allowed on r15.
 1421                  * Doing it like this is more efficient than trapping this
 1422                  * case in all possible locations in the prior fixup code.
 1423                  */
 1424 
 1425                 frame->tf_svc_lr = frame->tf_usr_lr;
 1426                 frame->tf_usr_lr = saved_lr;
 1427 
 1428                 /*
 1429                  * Note the trapframe does not have the SVC r13 so a fault
 1430                  * from an instruction with writeback to r13 in SVC mode is
 1431                  * not allowed. This should not happen as the kstack is
 1432                  * always valid.
 1433                  */
 1434         }
 1435 
 1436         return(ABORT_FIXUP_OK);
 1437 }
 1438 #endif  /* CPU_ARM2/250/3/6/7 */
 1439 
 1440 
 1441 #if defined(CPU_ARM7TDMI)
 1442 /*
 1443  * "Late" (base updated) data abort fixup
 1444  *
 1445  * For ARM6 (in late-abort mode) and ARM7.
 1446  *
 1447  * In this model, all data-transfer instructions need fixing up.  We defer
 1448  * LDM, STM, LDC and STC fixup to the early-abort handler.
 1449  */
 1450 int
 1451 late_abort_fixup(arg)
 1452         void *arg;
 1453 {
 1454         trapframe_t *frame = arg;
 1455         u_int fault_pc;
 1456         u_int fault_instruction;
 1457         int saved_lr = 0;
 1458 
 1459         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1460 
 1461                 /* Ok an abort in SVC mode */
 1462 
 1463                 /*
 1464                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1465                  * as the fault happened in svc mode but we need it in the
 1466                  * usr slot so we can treat the registers as an array of ints
 1467                  * during fixing.
 1468                  * NOTE: This PC is in the position but writeback is not
 1469                  * allowed on r15.
 1470                  * Doing it like this is more efficient than trapping this
 1471                  * case in all possible locations in the following fixup code.
 1472                  */
 1473 
 1474                 saved_lr = frame->tf_usr_lr;
 1475                 frame->tf_usr_lr = frame->tf_svc_lr;
 1476 
 1477                 /*
 1478                  * Note the trapframe does not have the SVC r13 so a fault
 1479                  * from an instruction with writeback to r13 in SVC mode is
 1480                  * not allowed. This should not happen as the kstack is
 1481                  * always valid.
 1482                  */
 1483         }
 1484 
 1485         /* Get fault address and status from the CPU */
 1486 
 1487         fault_pc = frame->tf_pc;
 1488         fault_instruction = *((volatile unsigned int *)fault_pc);
 1489 
 1490         /* Decode the fault instruction and fix the registers as needed */
 1491 
 1492         /* Was is a swap instruction ? */
 1493 
 1494         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
 1495                 DFC_DISASSEMBLE(fault_pc);
 1496         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
 1497 
 1498                 /* Was is a ldr/str instruction */
 1499                 /* This is for late abort only */
 1500 
 1501                 int base;
 1502                 int offset;
 1503                 int *registers = &frame->tf_r0;
 1504 
 1505                 DFC_DISASSEMBLE(fault_pc);
 1506                 
 1507                 /* This is for late abort only */
 1508 
 1509                 if ((fault_instruction & (1 << 24)) == 0
 1510                     || (fault_instruction & (1 << 21)) != 0) {  
 1511                         /* postindexed ldr/str with no writeback */
 1512 
 1513                         base = (fault_instruction >> 16) & 0x0f;
 1514                         if (base == 13 &&
 1515                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
 1516                                 return ABORT_FIXUP_FAILED;
 1517                         if (base == 15)
 1518                                 return ABORT_FIXUP_FAILED;
 1519                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
 1520                                        base, registers[base]));
 1521                         if ((fault_instruction & (1 << 25)) == 0) {
 1522                                 /* Immediate offset - easy */
 1523 
 1524                                 offset = fault_instruction & 0xfff;
 1525                                 if ((fault_instruction & (1 << 23)))
 1526                                         offset = -offset;
 1527                                 registers[base] += offset;
 1528                                 DFC_PRINTF(("imm=%08x ", offset));
 1529                         } else {
 1530                                 /* offset is a shifted register */
 1531                                 int shift;
 1532 
 1533                                 offset = fault_instruction & 0x0f;
 1534                                 if (offset == base)
 1535                                         return ABORT_FIXUP_FAILED;
 1536 
 1537                                 /*
 1538                                  * Register offset - hard we have to
 1539                                  * cope with shifts !
 1540                                  */
 1541                                 offset = registers[offset];
 1542 
 1543                                 if ((fault_instruction & (1 << 4)) == 0)
 1544                                         /* shift with amount */
 1545                                         shift = (fault_instruction >> 7) & 0x1f;
 1546                                 else {
 1547                                         /* shift with register */
 1548                                         if ((fault_instruction & (1 << 7)) != 0)
 1549                                                 /* undefined for now so bail out */
 1550                                                 return ABORT_FIXUP_FAILED;
 1551                                         shift = ((fault_instruction >> 8) & 0xf);
 1552                                         if (base == shift)
 1553                                                 return ABORT_FIXUP_FAILED;
 1554                                         DFC_PRINTF(("shift reg=%d ", shift));
 1555                                         shift = registers[shift];
 1556                                 }
 1557                                 DFC_PRINTF(("shift=%08x ", shift));
 1558                                 switch (((fault_instruction >> 5) & 0x3)) {
 1559                                 case 0 : /* Logical left */
 1560                                         offset = (int)(((u_int)offset) << shift);
 1561                                         break;
 1562                                 case 1 : /* Logical Right */
 1563                                         if (shift == 0) shift = 32;
 1564                                         offset = (int)(((u_int)offset) >> shift);
 1565                                         break;
 1566                                 case 2 : /* Arithmetic Right */
 1567                                         if (shift == 0) shift = 32;
 1568                                         offset = (int)(((int)offset) >> shift);
 1569                                         break;
 1570                                 case 3 : /* Rotate right (rol or rxx) */
 1571                                         return ABORT_FIXUP_FAILED;
 1572                                         break;
 1573                                 }
 1574 
 1575                                 DFC_PRINTF(("abt: fixed LDR/STR with "
 1576                                                "register offset\n"));
 1577                                 if ((fault_instruction & (1 << 23)))
 1578                                         offset = -offset;
 1579                                 DFC_PRINTF(("offset=%08x ", offset));
 1580                                 registers[base] += offset;
 1581                         }
 1582                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1583                 }
 1584         }
 1585 
 1586         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1587 
 1588                 /* Ok an abort in SVC mode */
 1589 
 1590                 /*
 1591                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1592                  * as the fault happened in svc mode but we need it in the
 1593                  * usr slot so we can treat the registers as an array of ints
 1594                  * during fixing.
 1595                  * NOTE: This PC is in the position but writeback is not
 1596                  * allowed on r15.
 1597                  * Doing it like this is more efficient than trapping this
 1598                  * case in all possible locations in the prior fixup code.
 1599                  */
 1600 
 1601                 frame->tf_svc_lr = frame->tf_usr_lr;
 1602                 frame->tf_usr_lr = saved_lr;
 1603 
 1604                 /*
 1605                  * Note the trapframe does not have the SVC r13 so a fault
 1606                  * from an instruction with writeback to r13 in SVC mode is
 1607                  * not allowed. This should not happen as the kstack is
 1608                  * always valid.
 1609                  */
 1610         }
 1611 
 1612         /*
 1613          * Now let the early-abort fixup routine have a go, in case it
 1614          * was an LDM, STM, LDC or STC that faulted.
 1615          */
 1616 
 1617         return early_abort_fixup(arg);
 1618 }
 1619 #endif  /* CPU_ARM7TDMI */
 1620 
 1621 /*
 1622  * CPU Setup code
 1623  */
 1624 
 1625 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
 1626   defined(CPU_ARM9E) || \
 1627   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
 1628   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
 1629   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
 1630   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
 1631   defined(CPU_ARM10) ||  defined(CPU_ARM11) || \
 1632   defined(CPU_FA526) || defined(CPU_FA626TE)
 1633 
 1634 #define IGN     0
 1635 #define OR      1
 1636 #define BIC     2
 1637 
 1638 struct cpu_option {
 1639         char    *co_name;
 1640         int     co_falseop;
 1641         int     co_trueop;
 1642         int     co_value;
 1643 };
 1644 
 1645 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
 1646 
 1647 static u_int
 1648 parse_cpu_options(args, optlist, cpuctrl)
 1649         char *args;
 1650         struct cpu_option *optlist;
 1651         u_int cpuctrl;
 1652 {
 1653         int integer;
 1654 
 1655         if (args == NULL)
 1656                 return(cpuctrl);
 1657 
 1658         while (optlist->co_name) {
 1659                 if (get_bootconf_option(args, optlist->co_name,
 1660                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1661                         if (integer) {
 1662                                 if (optlist->co_trueop == OR)
 1663                                         cpuctrl |= optlist->co_value;
 1664                                 else if (optlist->co_trueop == BIC)
 1665                                         cpuctrl &= ~optlist->co_value;
 1666                         } else {
 1667                                 if (optlist->co_falseop == OR)
 1668                                         cpuctrl |= optlist->co_value;
 1669                                 else if (optlist->co_falseop == BIC)
 1670                                         cpuctrl &= ~optlist->co_value;
 1671                         }
 1672                 }
 1673                 ++optlist;
 1674         }
 1675         return(cpuctrl);
 1676 }
 1677 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
 1678 
 1679 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
 1680 struct cpu_option arm678_options[] = {
 1681 #ifdef COMPAT_12
 1682         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
 1683         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 1684 #endif  /* COMPAT_12 */
 1685         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 1686         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 1687         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1688         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1689         { NULL,                 IGN, IGN, 0 }
 1690 };
 1691 
 1692 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
 1693 
 1694 #ifdef CPU_ARM7TDMI
 1695 struct cpu_option arm7tdmi_options[] = {
 1696         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 1697         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 1698         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1699         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1700 #ifdef COMPAT_12
 1701         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
 1702 #endif  /* COMPAT_12 */
 1703         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
 1704         { NULL,                 IGN, IGN, 0 }
 1705 };
 1706 
 1707 void
 1708 arm7tdmi_setup(args)
 1709         char *args;
 1710 {
 1711         int cpuctrl;
 1712 
 1713         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1714                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1715                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
 1716 
 1717         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
 1718         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
 1719 
 1720 #ifdef __ARMEB__
 1721         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1722 #endif
 1723 
 1724         /* Clear out the cache */
 1725         cpu_idcache_wbinv_all();
 1726 
 1727         /* Set the control register */
 1728         ctrl = cpuctrl;
 1729         cpu_control(0xffffffff, cpuctrl);
 1730 }
 1731 #endif  /* CPU_ARM7TDMI */
 1732 
 1733 #ifdef CPU_ARM8
 1734 struct cpu_option arm8_options[] = {
 1735         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 1736         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 1737         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1738         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1739 #ifdef COMPAT_12
 1740         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1741 #endif  /* COMPAT_12 */
 1742         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1743         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1744         { NULL,                 IGN, IGN, 0 }
 1745 };
 1746 
 1747 void
 1748 arm8_setup(args)
 1749         char *args;
 1750 {
 1751         int integer;
 1752         int cpuctrl, cpuctrlmask;
 1753         int clocktest;
 1754         int setclock = 0;
 1755 
 1756         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1757                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1758                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
 1759         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1760                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1761                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
 1762                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
 1763                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
 1764 
 1765 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1766         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1767 #endif
 1768 
 1769         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
 1770         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
 1771 
 1772 #ifdef __ARMEB__
 1773         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1774 #endif
 1775 
 1776         /* Get clock configuration */
 1777         clocktest = arm8_clock_config(0, 0) & 0x0f;
 1778 
 1779         /* Special ARM8 clock and test configuration */
 1780         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1781                 clocktest = 0;
 1782                 setclock = 1;
 1783         }
 1784         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1785                 if (integer)
 1786                         clocktest |= 0x01;
 1787                 else
 1788                         clocktest &= ~(0x01);
 1789                 setclock = 1;
 1790         }
 1791         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1792                 if (integer)
 1793                         clocktest |= 0x02;
 1794                 else
 1795                         clocktest &= ~(0x02);
 1796                 setclock = 1;
 1797         }
 1798         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
 1799                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
 1800                 setclock = 1;
 1801         }
 1802         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
 1803                 clocktest |= (integer & 7) << 5;
 1804                 setclock = 1;
 1805         }
 1806         
 1807         /* Clear out the cache */
 1808         cpu_idcache_wbinv_all();
 1809 
 1810         /* Set the control register */
 1811         ctrl = cpuctrl;
 1812         cpu_control(0xffffffff, cpuctrl);
 1813 
 1814         /* Set the clock/test register */
 1815         if (setclock)
 1816                 arm8_clock_config(0x7f, clocktest);
 1817 }
 1818 #endif  /* CPU_ARM8 */
 1819 
 1820 #ifdef CPU_ARM9
 1821 struct cpu_option arm9_options[] = {
 1822         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1823         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1824         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1825         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1826         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1827         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1828         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1829         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1830         { NULL,                 IGN, IGN, 0 }
 1831 };
 1832 
 1833 void
 1834 arm9_setup(args)
 1835         char *args;
 1836 {
 1837         int cpuctrl, cpuctrlmask;
 1838 
 1839         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1840             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1841             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1842             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
 1843             CPU_CONTROL_ROUNDROBIN;
 1844         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1845                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1846                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1847                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1848                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1849                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
 1850                  | CPU_CONTROL_ROUNDROBIN;
 1851 
 1852 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1853         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1854 #endif
 1855 
 1856         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
 1857 
 1858 #ifdef __ARMEB__
 1859         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1860 #endif
 1861         if (vector_page == ARM_VECTORS_HIGH)
 1862                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1863 
 1864         /* Clear out the cache */
 1865         cpu_idcache_wbinv_all();
 1866 
 1867         /* Set the control register */
 1868         cpu_control(cpuctrlmask, cpuctrl);
 1869         ctrl = cpuctrl;
 1870 
 1871 }
 1872 #endif  /* CPU_ARM9 */
 1873 
 1874 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
 1875 struct cpu_option arm10_options[] = {
 1876         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1877         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1878         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1879         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1880         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1881         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1882         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1883         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1884         { NULL,                 IGN, IGN, 0 }
 1885 };
 1886 
 1887 void
 1888 arm10_setup(args)
 1889         char *args;
 1890 {
 1891         int cpuctrl, cpuctrlmask;
 1892 
 1893         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1894             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1895             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
 1896         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1897             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1898             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1899             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1900             | CPU_CONTROL_BPRD_ENABLE
 1901             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
 1902 
 1903 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1904         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1905 #endif
 1906 
 1907         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
 1908 
 1909 #ifdef __ARMEB__
 1910         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1911 #endif
 1912 
 1913         /* Clear out the cache */
 1914         cpu_idcache_wbinv_all();
 1915 
 1916         /* Now really make sure they are clean.  */
 1917         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
 1918 
 1919         if (vector_page == ARM_VECTORS_HIGH)
 1920                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1921 
 1922         /* Set the control register */
 1923         ctrl = cpuctrl;
 1924         cpu_control(0xffffffff, cpuctrl);
 1925 
 1926         /* And again. */
 1927         cpu_idcache_wbinv_all();
 1928 }
 1929 #endif  /* CPU_ARM9E || CPU_ARM10 */
 1930 
 1931 #ifdef CPU_ARM11
 1932 struct cpu_option arm11_options[] = {
 1933         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1934         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1935         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1936         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1937         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1938         { NULL,                 IGN, IGN, 0 }
 1939 };
 1940 
 1941 void
 1942 arm11_setup(args)
 1943         char *args;
 1944 {
 1945         int cpuctrl, cpuctrlmask;
 1946 
 1947         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1948             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1949             /* | CPU_CONTROL_BPRD_ENABLE */;
 1950         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1951             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1952             | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
 1953             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1954             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
 1955 
 1956 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1957         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1958 #endif
 1959 
 1960         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
 1961 
 1962 #ifdef __ARMEB__
 1963         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1964 #endif
 1965 
 1966         /* Clear out the cache */
 1967         cpu_idcache_wbinv_all();
 1968 
 1969         /* Now really make sure they are clean.  */
 1970         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
 1971 
 1972         /* Set the control register */
 1973         curcpu()->ci_ctrl = cpuctrl;
 1974         cpu_control(0xffffffff, cpuctrl);
 1975 
 1976         /* And again. */
 1977         cpu_idcache_wbinv_all();
 1978 }
 1979 #endif  /* CPU_ARM11 */
 1980 
 1981 #ifdef CPU_SA110
 1982 struct cpu_option sa110_options[] = {
 1983 #ifdef COMPAT_12
 1984         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1985         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 1986 #endif  /* COMPAT_12 */
 1987         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1988         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1989         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1990         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1991         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1992         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1993         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1994         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1995         { NULL,                 IGN, IGN, 0 }
 1996 };
 1997 
 1998 void
 1999 sa110_setup(args)
 2000         char *args;
 2001 {
 2002         int cpuctrl, cpuctrlmask;
 2003 
 2004         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2005                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2006                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2007                  | CPU_CONTROL_WBUF_ENABLE;
 2008         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2009                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2010                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2011                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2012                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2013                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2014                  | CPU_CONTROL_CPCLK;
 2015 
 2016 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2017         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2018 #endif
 2019 
 2020         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
 2021 
 2022 #ifdef __ARMEB__
 2023         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2024 #endif
 2025 
 2026         /* Clear out the cache */
 2027         cpu_idcache_wbinv_all();
 2028 
 2029         /* Set the control register */
 2030         ctrl = cpuctrl;
 2031 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 2032         cpu_control(0xffffffff, cpuctrl);
 2033 
 2034         /*
 2035          * enable clockswitching, note that this doesn't read or write to r0,
 2036          * r0 is just to make it valid asm
 2037          */
 2038         __asm ("mcr 15, 0, r0, c15, c1, 2");
 2039 }
 2040 #endif  /* CPU_SA110 */
 2041 
 2042 #if defined(CPU_SA1100) || defined(CPU_SA1110)
 2043 struct cpu_option sa11x0_options[] = {
 2044 #ifdef COMPAT_12
 2045         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2046         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 2047 #endif  /* COMPAT_12 */
 2048         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2049         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2050         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2051         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2052         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2053         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2054         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2055         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2056         { NULL,                 IGN, IGN, 0 }
 2057 };
 2058 
 2059 void
 2060 sa11x0_setup(args)
 2061         char *args;
 2062 {
 2063         int cpuctrl, cpuctrlmask;
 2064 
 2065         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2066                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2067                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2068                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
 2069         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2070                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2071                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2072                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2073                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2074                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2075                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 2076 
 2077 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2078         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2079 #endif
 2080 
 2081 
 2082         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
 2083 
 2084 #ifdef __ARMEB__
 2085         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2086 #endif
 2087 
 2088         if (vector_page == ARM_VECTORS_HIGH)
 2089                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2090         /* Clear out the cache */
 2091         cpu_idcache_wbinv_all();
 2092         /* Set the control register */
 2093         ctrl = cpuctrl;
 2094         cpu_control(0xffffffff, cpuctrl);
 2095 }
 2096 #endif  /* CPU_SA1100 || CPU_SA1110 */
 2097 
 2098 #if defined(CPU_FA526) || defined(CPU_FA626TE)
 2099 struct cpu_option fa526_options[] = {
 2100 #ifdef COMPAT_12
 2101         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE |
 2102                                            CPU_CONTROL_DC_ENABLE) },
 2103         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 2104 #endif  /* COMPAT_12 */
 2105         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE |
 2106                                            CPU_CONTROL_DC_ENABLE) },
 2107         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE |
 2108                                            CPU_CONTROL_DC_ENABLE) },
 2109         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2110         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2111         { NULL,                 IGN, IGN, 0 }
 2112 };
 2113 
 2114 void
 2115 fa526_setup(char *args)
 2116 {
 2117         int cpuctrl, cpuctrlmask;
 2118 
 2119         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2120                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2121                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2122                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 2123                 | CPU_CONTROL_BPRD_ENABLE;
 2124         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2125                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2126                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2127                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2128                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2129                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2130                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 2131 
 2132 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2133         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2134 #endif
 2135 
 2136         cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
 2137 
 2138 #ifdef __ARMEB__
 2139         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2140 #endif
 2141 
 2142         if (vector_page == ARM_VECTORS_HIGH)
 2143                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2144 
 2145         /* Clear out the cache */
 2146         cpu_idcache_wbinv_all();
 2147 
 2148         /* Set the control register */
 2149         ctrl = cpuctrl;
 2150         cpu_control(0xffffffff, cpuctrl);
 2151 }
 2152 #endif  /* CPU_FA526 || CPU_FA626TE */
 2153 
 2154 
 2155 #if defined(CPU_IXP12X0)
 2156 struct cpu_option ixp12x0_options[] = {
 2157         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2158         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2159         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2160         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2161         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2162         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2163         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 2164         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 2165         { NULL,                 IGN, IGN, 0 }
 2166 };
 2167 
 2168 void
 2169 ixp12x0_setup(args)
 2170         char *args;
 2171 {
 2172         int cpuctrl, cpuctrlmask;
 2173 
 2174 
 2175         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
 2176                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
 2177                  | CPU_CONTROL_IC_ENABLE;
 2178 
 2179         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2180                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
 2181                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
 2182                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
 2183                  | CPU_CONTROL_VECRELOC;
 2184 
 2185 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2186         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2187 #endif
 2188 
 2189         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
 2190 
 2191 #ifdef __ARMEB__
 2192         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2193 #endif
 2194 
 2195         if (vector_page == ARM_VECTORS_HIGH)
 2196                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2197 
 2198         /* Clear out the cache */
 2199         cpu_idcache_wbinv_all();
 2200 
 2201         /* Set the control register */
 2202         ctrl = cpuctrl;
 2203         /* cpu_control(0xffffffff, cpuctrl); */
 2204         cpu_control(cpuctrlmask, cpuctrl);
 2205 }
 2206 #endif /* CPU_IXP12X0 */
 2207 
 2208 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
 2209   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
 2210   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
 2211 struct cpu_option xscale_options[] = {
 2212 #ifdef COMPAT_12
 2213         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2214         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2215 #endif  /* COMPAT_12 */
 2216         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2217         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2218         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2219         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 2220         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 2221         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 2222         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 2223         { NULL,                 IGN, IGN, 0 }
 2224 };
 2225 
 2226 void
 2227 xscale_setup(args)
 2228         char *args;
 2229 {
 2230         uint32_t auxctl;
 2231         int cpuctrl, cpuctrlmask;
 2232 
 2233         /*
 2234          * The XScale Write Buffer is always enabled.  Our option
 2235          * is to enable/disable coalescing.  Note that bits 6:3
 2236          * must always be enabled.
 2237          */
 2238 
 2239         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2240                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2241                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2242                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 2243                  | CPU_CONTROL_BPRD_ENABLE;
 2244         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 2245                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 2246                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 2247                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 2248                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 2249                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 2250                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
 2251                  CPU_CONTROL_L2_ENABLE;
 2252 
 2253 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 2254         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 2255 #endif
 2256 
 2257         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
 2258 
 2259 #ifdef __ARMEB__
 2260         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 2261 #endif
 2262 
 2263         if (vector_page == ARM_VECTORS_HIGH)
 2264                 cpuctrl |= CPU_CONTROL_VECRELOC;
 2265 #ifdef CPU_XSCALE_CORE3
 2266         cpuctrl |= CPU_CONTROL_L2_ENABLE;
 2267 #endif
 2268 
 2269         /* Clear out the cache */
 2270         cpu_idcache_wbinv_all();
 2271 
 2272         /*
 2273          * Set the control register.  Note that bits 6:3 must always
 2274          * be set to 1.
 2275          */
 2276         ctrl = cpuctrl;
 2277 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 2278         cpu_control(0xffffffff, cpuctrl);
 2279 
 2280         /* Make sure write coalescing is turned on */
 2281         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
 2282                 : "=r" (auxctl));
 2283 #ifdef XSCALE_NO_COALESCE_WRITES
 2284         auxctl |= XSCALE_AUXCTL_K;
 2285 #else
 2286         auxctl &= ~XSCALE_AUXCTL_K;
 2287 #endif
 2288 #ifdef CPU_XSCALE_CORE3
 2289         auxctl |= XSCALE_AUXCTL_LLR;
 2290         auxctl |= XSCALE_AUXCTL_MD_MASK;
 2291 #endif
 2292         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
 2293                 : : "r" (auxctl));
 2294 }
 2295 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
 2296            CPU_XSCALE_80219 */

Cache object: a1f3c00477a90b18ef63dcc6199d3434


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.