The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/cpufunc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
    2 
    3 /*-
    4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
    5  * arm8 support code Copyright (c) 1997 ARM Limited
    6  * arm8 support code Copyright (c) 1997 Causality Limited
    7  * arm9 support code Copyright (C) 2001 ARM Ltd
    8  * Copyright (c) 1997 Mark Brinicombe.
    9  * Copyright (c) 1997 Causality Limited
   10  * All rights reserved.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. All advertising materials mentioning features or use of this software
   21  *    must display the following acknowledgement:
   22  *      This product includes software developed by Causality Limited.
   23  * 4. The name of Causality Limited may not be used to endorse or promote
   24  *    products derived from this software without specific prior written
   25  *    permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
   28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
   31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   37  * SUCH DAMAGE.
   38  *
   39  * RiscBSD kernel project
   40  *
   41  * cpufuncs.c
   42  *
   43  * C functions for supporting CPU / MMU / TLB specific operations.
   44  *
   45  * Created      : 30/01/97
   46  */
   47 #include <sys/cdefs.h>
   48 __FBSDID("$FreeBSD$");
   49 
   50 #include <sys/cdefs.h>
   51 
   52 #include <sys/types.h>
   53 #include <sys/param.h>
   54 #include <sys/systm.h>
   55 #include <sys/lock.h>
   56 #include <sys/mutex.h>
   57 #include <sys/bus.h>
   58 #include <machine/bus.h>
   59 #include <machine/cpu.h>
   60 #include <machine/disassem.h>
   61 
   62 #include <vm/vm.h>
   63 #include <vm/pmap.h>
   64 
   65 #include <machine/cpuconf.h>
   66 #include <machine/cpufunc.h>
   67 #include <machine/bootconfig.h>
   68 
   69 #ifdef CPU_XSCALE_80200
   70 #include <arm/xscale/i80200/i80200reg.h>
   71 #include <arm/xscale/i80200/i80200var.h>
   72 #endif
   73 
   74 #ifdef CPU_XSCALE_80321
   75 #include <arm/xscale/i80321/i80321reg.h>
   76 #include <arm/xscale/i80321/i80321var.h>
   77 #endif
   78 
   79 #ifdef CPU_XSCALE_IXP425
   80 #include <arm/xscale/ixp425/ixp425reg.h>
   81 #include <arm/xscale/ixp425/ixp425var.h>
   82 #endif
   83 
   84 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321)
   85 #include <arm/xscale/xscalereg.h>
   86 #endif
   87 
   88 #if defined(PERFCTRS)
   89 struct arm_pmc_funcs *arm_pmc;
   90 #endif
   91 
   92 /* PRIMARY CACHE VARIABLES */
   93 int     arm_picache_size;
   94 int     arm_picache_line_size;
   95 int     arm_picache_ways;
   96 
   97 int     arm_pdcache_size;       /* and unified */
   98 int     arm_pdcache_line_size;
   99 int     arm_pdcache_ways;
  100 
  101 int     arm_pcache_type;
  102 int     arm_pcache_unified;
  103 
  104 int     arm_dcache_align;
  105 int     arm_dcache_align_mask;
  106 
  107 /* 1 == use cpu_sleep(), 0 == don't */
  108 int cpu_do_powersave;
  109 int ctrl;
  110 
  111 #ifdef CPU_ARM7TDMI
  112 struct cpu_functions arm7tdmi_cpufuncs = {
  113         /* CPU functions */
  114         
  115         cpufunc_id,                     /* id                   */
  116         cpufunc_nullop,                 /* cpwait               */
  117 
  118         /* MMU functions */
  119 
  120         cpufunc_control,                /* control              */
  121         cpufunc_domains,                /* domain               */
  122         arm7tdmi_setttb,                /* setttb               */
  123         cpufunc_faultstatus,            /* faultstatus          */
  124         cpufunc_faultaddress,           /* faultaddress         */
  125 
  126         /* TLB functions */
  127 
  128         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
  129         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
  130         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
  131         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
  132         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
  133         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
  134 
  135         /* Cache operations */
  136 
  137         cpufunc_nullop,                 /* icache_sync_all      */
  138         (void *)cpufunc_nullop,         /* icache_sync_range    */
  139 
  140         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
  141         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
  142         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
  143         (void *)cpufunc_nullop,         /* dcache_wb_range      */
  144 
  145         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
  146         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
  147 
  148         /* Other functions */
  149 
  150         cpufunc_nullop,                 /* flush_prefetchbuf    */
  151         cpufunc_nullop,                 /* drain_writebuf       */
  152         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  153         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  154 
  155         (void *)cpufunc_nullop,         /* sleep                */
  156 
  157         /* Soft functions */
  158 
  159         late_abort_fixup,               /* dataabt_fixup        */
  160         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  161 
  162         arm7tdmi_context_switch,        /* context_switch       */
  163 
  164         arm7tdmi_setup                  /* cpu setup            */
  165 
  166 };
  167 #endif  /* CPU_ARM7TDMI */
  168 
  169 #ifdef CPU_ARM8
  170 struct cpu_functions arm8_cpufuncs = {
  171         /* CPU functions */
  172         
  173         cpufunc_id,                     /* id                   */
  174         cpufunc_nullop,                 /* cpwait               */
  175 
  176         /* MMU functions */
  177 
  178         cpufunc_control,                /* control              */
  179         cpufunc_domains,                /* domain               */
  180         arm8_setttb,                    /* setttb               */
  181         cpufunc_faultstatus,            /* faultstatus          */
  182         cpufunc_faultaddress,           /* faultaddress         */
  183 
  184         /* TLB functions */
  185 
  186         arm8_tlb_flushID,               /* tlb_flushID          */
  187         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
  188         arm8_tlb_flushID,               /* tlb_flushI           */
  189         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
  190         arm8_tlb_flushID,               /* tlb_flushD           */
  191         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
  192 
  193         /* Cache operations */
  194 
  195         cpufunc_nullop,                 /* icache_sync_all      */
  196         (void *)cpufunc_nullop,         /* icache_sync_range    */
  197 
  198         arm8_cache_purgeID,             /* dcache_wbinv_all     */
  199         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
  200 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
  201         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
  202 
  203         arm8_cache_purgeID,             /* idcache_wbinv_all    */
  204         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
  205 
  206         /* Other functions */
  207 
  208         cpufunc_nullop,                 /* flush_prefetchbuf    */
  209         cpufunc_nullop,                 /* drain_writebuf       */
  210         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  211         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  212 
  213         (void *)cpufunc_nullop,         /* sleep                */
  214 
  215         /* Soft functions */
  216 
  217         cpufunc_null_fixup,             /* dataabt_fixup        */
  218         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  219 
  220         arm8_context_switch,            /* context_switch       */
  221 
  222         arm8_setup                      /* cpu setup            */
  223 };          
  224 #endif  /* CPU_ARM8 */
  225 
  226 #ifdef CPU_ARM9
  227 struct cpu_functions arm9_cpufuncs = {
  228         /* CPU functions */
  229 
  230         cpufunc_id,                     /* id                   */
  231         cpufunc_nullop,                 /* cpwait               */
  232 
  233         /* MMU functions */
  234 
  235         cpufunc_control,                /* control              */
  236         cpufunc_domains,                /* Domain               */
  237         arm9_setttb,                    /* Setttb               */
  238         cpufunc_faultstatus,            /* Faultstatus          */
  239         cpufunc_faultaddress,           /* Faultaddress         */
  240 
  241         /* TLB functions */
  242 
  243         armv4_tlb_flushID,              /* tlb_flushID          */
  244         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
  245         armv4_tlb_flushI,               /* tlb_flushI           */
  246         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  247         armv4_tlb_flushD,               /* tlb_flushD           */
  248         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  249 
  250         /* Cache operations */
  251 
  252         arm9_icache_sync_all,           /* icache_sync_all      */
  253         arm9_icache_sync_range,         /* icache_sync_range    */
  254 
  255         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
  256         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
  257 /*XXX*/ arm9_dcache_wbinv_range,        /* dcache_inv_range     */
  258         arm9_dcache_wb_range,           /* dcache_wb_range      */
  259 
  260         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
  261         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
  262 
  263         /* Other functions */
  264 
  265         cpufunc_nullop,                 /* flush_prefetchbuf    */
  266         armv4_drain_writebuf,           /* drain_writebuf       */
  267         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  268         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  269 
  270         (void *)cpufunc_nullop,         /* sleep                */
  271 
  272         /* Soft functions */
  273 
  274         cpufunc_null_fixup,             /* dataabt_fixup        */
  275         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  276 
  277         arm9_context_switch,            /* context_switch       */
  278 
  279         arm9_setup                      /* cpu setup            */
  280 
  281 };
  282 #endif /* CPU_ARM9 */
  283 
  284 #ifdef CPU_ARM10
  285 struct cpu_functions arm10_cpufuncs = {
  286         /* CPU functions */
  287 
  288         cpufunc_id,                     /* id                   */
  289         cpufunc_nullop,                 /* cpwait               */
  290 
  291         /* MMU functions */
  292 
  293         cpufunc_control,                /* control              */
  294         cpufunc_domains,                /* Domain               */
  295         arm10_setttb,                   /* Setttb               */
  296         cpufunc_faultstatus,            /* Faultstatus          */
  297         cpufunc_faultaddress,           /* Faultaddress         */
  298 
  299         /* TLB functions */
  300 
  301         armv4_tlb_flushID,              /* tlb_flushID          */
  302         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
  303         armv4_tlb_flushI,               /* tlb_flushI           */
  304         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
  305         armv4_tlb_flushD,               /* tlb_flushD           */
  306         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  307 
  308         /* Cache operations */
  309 
  310         arm10_icache_sync_all,          /* icache_sync_all      */
  311         arm10_icache_sync_range,        /* icache_sync_range    */
  312 
  313         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
  314         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
  315         arm10_dcache_inv_range,         /* dcache_inv_range     */
  316         arm10_dcache_wb_range,          /* dcache_wb_range      */
  317 
  318         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
  319         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
  320 
  321         /* Other functions */
  322 
  323         cpufunc_nullop,                 /* flush_prefetchbuf    */
  324         armv4_drain_writebuf,           /* drain_writebuf       */
  325         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  326         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  327 
  328         (void *)cpufunc_nullop,         /* sleep                */
  329 
  330         /* Soft functions */
  331 
  332         cpufunc_null_fixup,             /* dataabt_fixup        */
  333         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  334 
  335         arm10_context_switch,           /* context_switch       */
  336 
  337         arm10_setup                     /* cpu setup            */
  338 
  339 };
  340 #endif /* CPU_ARM10 */
  341 
  342 #ifdef CPU_SA110
  343 struct cpu_functions sa110_cpufuncs = {
  344         /* CPU functions */
  345         
  346         cpufunc_id,                     /* id                   */
  347         cpufunc_nullop,                 /* cpwait               */
  348 
  349         /* MMU functions */
  350 
  351         cpufunc_control,                /* control              */
  352         cpufunc_domains,                /* domain               */
  353         sa1_setttb,                     /* setttb               */
  354         cpufunc_faultstatus,            /* faultstatus          */
  355         cpufunc_faultaddress,           /* faultaddress         */
  356 
  357         /* TLB functions */
  358 
  359         armv4_tlb_flushID,              /* tlb_flushID          */
  360         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  361         armv4_tlb_flushI,               /* tlb_flushI           */
  362         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  363         armv4_tlb_flushD,               /* tlb_flushD           */
  364         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  365 
  366         /* Cache operations */
  367 
  368         sa1_cache_syncI,                /* icache_sync_all      */
  369         sa1_cache_syncI_rng,            /* icache_sync_range    */
  370 
  371         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  372         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  373 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  374         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  375 
  376         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  377         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  378 
  379         /* Other functions */
  380 
  381         cpufunc_nullop,                 /* flush_prefetchbuf    */
  382         armv4_drain_writebuf,           /* drain_writebuf       */
  383         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  384         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  385 
  386         (void *)cpufunc_nullop,         /* sleep                */
  387 
  388         /* Soft functions */
  389 
  390         cpufunc_null_fixup,             /* dataabt_fixup        */
  391         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  392 
  393         sa110_context_switch,           /* context_switch       */
  394 
  395         sa110_setup                     /* cpu setup            */
  396 };          
  397 #endif  /* CPU_SA110 */
  398 
  399 #if defined(CPU_SA1100) || defined(CPU_SA1110)
  400 struct cpu_functions sa11x0_cpufuncs = {
  401         /* CPU functions */
  402         
  403         cpufunc_id,                     /* id                   */
  404         cpufunc_nullop,                 /* cpwait               */
  405 
  406         /* MMU functions */
  407 
  408         cpufunc_control,                /* control              */
  409         cpufunc_domains,                /* domain               */
  410         sa1_setttb,                     /* setttb               */
  411         cpufunc_faultstatus,            /* faultstatus          */
  412         cpufunc_faultaddress,           /* faultaddress         */
  413 
  414         /* TLB functions */
  415 
  416         armv4_tlb_flushID,              /* tlb_flushID          */
  417         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  418         armv4_tlb_flushI,               /* tlb_flushI           */
  419         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  420         armv4_tlb_flushD,               /* tlb_flushD           */
  421         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  422 
  423         /* Cache operations */
  424 
  425         sa1_cache_syncI,                /* icache_sync_all      */
  426         sa1_cache_syncI_rng,            /* icache_sync_range    */
  427 
  428         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  429         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  430 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  431         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  432 
  433         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  434         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  435 
  436         /* Other functions */
  437 
  438         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
  439         armv4_drain_writebuf,           /* drain_writebuf       */
  440         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  441         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  442 
  443         sa11x0_cpu_sleep,               /* sleep                */
  444 
  445         /* Soft functions */
  446 
  447         cpufunc_null_fixup,             /* dataabt_fixup        */
  448         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  449 
  450         sa11x0_context_switch,          /* context_switch       */
  451 
  452         sa11x0_setup                    /* cpu setup            */
  453 };          
  454 #endif  /* CPU_SA1100 || CPU_SA1110 */
  455 
  456 #ifdef CPU_IXP12X0
  457 struct cpu_functions ixp12x0_cpufuncs = {
  458         /* CPU functions */
  459         
  460         cpufunc_id,                     /* id                   */
  461         cpufunc_nullop,                 /* cpwait               */
  462 
  463         /* MMU functions */
  464 
  465         cpufunc_control,                /* control              */
  466         cpufunc_domains,                /* domain               */
  467         sa1_setttb,                     /* setttb               */
  468         cpufunc_faultstatus,            /* faultstatus          */
  469         cpufunc_faultaddress,           /* faultaddress         */
  470 
  471         /* TLB functions */
  472 
  473         armv4_tlb_flushID,              /* tlb_flushID          */
  474         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
  475         armv4_tlb_flushI,               /* tlb_flushI           */
  476         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  477         armv4_tlb_flushD,               /* tlb_flushD           */
  478         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  479 
  480         /* Cache operations */
  481 
  482         sa1_cache_syncI,                /* icache_sync_all      */
  483         sa1_cache_syncI_rng,            /* icache_sync_range    */
  484 
  485         sa1_cache_purgeD,               /* dcache_wbinv_all     */
  486         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
  487 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
  488         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
  489 
  490         sa1_cache_purgeID,              /* idcache_wbinv_all    */
  491         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
  492 
  493         /* Other functions */
  494 
  495         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
  496         armv4_drain_writebuf,           /* drain_writebuf       */
  497         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  498         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  499 
  500         (void *)cpufunc_nullop,         /* sleep                */
  501 
  502         /* Soft functions */
  503 
  504         cpufunc_null_fixup,             /* dataabt_fixup        */
  505         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  506 
  507         ixp12x0_context_switch,         /* context_switch       */
  508 
  509         ixp12x0_setup                   /* cpu setup            */
  510 };          
  511 #endif  /* CPU_IXP12X0 */
  512 
  513 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
  514     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
  515 struct cpu_functions xscale_cpufuncs = {
  516         /* CPU functions */
  517         
  518         cpufunc_id,                     /* id                   */
  519         xscale_cpwait,                  /* cpwait               */
  520 
  521         /* MMU functions */
  522 
  523         xscale_control,                 /* control              */
  524         cpufunc_domains,                /* domain               */
  525         xscale_setttb,                  /* setttb               */
  526         cpufunc_faultstatus,            /* faultstatus          */
  527         cpufunc_faultaddress,           /* faultaddress         */
  528 
  529         /* TLB functions */
  530 
  531         armv4_tlb_flushID,              /* tlb_flushID          */
  532         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
  533         armv4_tlb_flushI,               /* tlb_flushI           */
  534         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
  535         armv4_tlb_flushD,               /* tlb_flushD           */
  536         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
  537 
  538         /* Cache operations */
  539 
  540         xscale_cache_syncI,             /* icache_sync_all      */
  541         xscale_cache_syncI_rng,         /* icache_sync_range    */
  542 
  543         xscale_cache_purgeD,            /* dcache_wbinv_all     */
  544         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
  545         xscale_cache_flushD_rng,        /* dcache_inv_range     */
  546         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
  547 
  548         xscale_cache_purgeID,           /* idcache_wbinv_all    */
  549         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
  550 
  551         /* Other functions */
  552 
  553         cpufunc_nullop,                 /* flush_prefetchbuf    */
  554         armv4_drain_writebuf,           /* drain_writebuf       */
  555         cpufunc_nullop,                 /* flush_brnchtgt_C     */
  556         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
  557 
  558         xscale_cpu_sleep,               /* sleep                */
  559 
  560         /* Soft functions */
  561 
  562         cpufunc_null_fixup,             /* dataabt_fixup        */
  563         cpufunc_null_fixup,             /* prefetchabt_fixup    */
  564 
  565         xscale_context_switch,          /* context_switch       */
  566 
  567         xscale_setup                    /* cpu setup            */
  568 };
  569 #endif
  570 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
  571 
  572 /*
  573  * Global constants also used by locore.s
  574  */
  575 
  576 struct cpu_functions cpufuncs;
  577 u_int cputype;
  578 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
  579 
  580 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
  581     defined (CPU_ARM10) || \
  582     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
  583     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
  584 static void get_cachetype_cp15(void);
  585 
  586 /* Additional cache information local to this file.  Log2 of some of the
  587    above numbers.  */
  588 static int      arm_dcache_l2_nsets;
  589 static int      arm_dcache_l2_assoc;
  590 static int      arm_dcache_l2_linesize;
  591 
  592 static void
  593 get_cachetype_cp15()
  594 {
  595         u_int ctype, isize, dsize;
  596         u_int multiplier;
  597 
  598         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
  599                 : "=r" (ctype));
  600 
  601         /*
  602          * ...and thus spake the ARM ARM:
  603          *
  604          * If an <opcode2> value corresponding to an unimplemented or
  605          * reserved ID register is encountered, the System Control
  606          * processor returns the value of the main ID register.
  607          */
  608         if (ctype == cpufunc_id())
  609                 goto out;
  610 
  611         if ((ctype & CPU_CT_S) == 0)
  612                 arm_pcache_unified = 1;
  613 
  614         /*
  615          * If you want to know how this code works, go read the ARM ARM.
  616          */
  617 
  618         arm_pcache_type = CPU_CT_CTYPE(ctype);
  619 
  620         if (arm_pcache_unified == 0) {
  621                 isize = CPU_CT_ISIZE(ctype);
  622                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
  623                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
  624                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
  625                         if (isize & CPU_CT_xSIZE_M)
  626                                 arm_picache_line_size = 0; /* not present */
  627                         else
  628                                 arm_picache_ways = 1;
  629                 } else {
  630                         arm_picache_ways = multiplier <<
  631                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
  632                 }
  633                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
  634         }
  635 
  636         dsize = CPU_CT_DSIZE(ctype);
  637         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
  638         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
  639         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
  640                 if (dsize & CPU_CT_xSIZE_M)
  641                         arm_pdcache_line_size = 0; /* not present */
  642                 else
  643                         arm_pdcache_ways = 1;
  644         } else {
  645                 arm_pdcache_ways = multiplier <<
  646                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
  647         }
  648         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
  649 
  650         arm_dcache_align = arm_pdcache_line_size;
  651 
  652         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
  653         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
  654         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
  655             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
  656 
  657  out:
  658         arm_dcache_align_mask = arm_dcache_align - 1;
  659 }
  660 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
  661 
  662 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
  663     defined(CPU_IXP12X0)
  664 /* Cache information for CPUs without cache type registers. */
  665 struct cachetab {
  666         u_int32_t ct_cpuid;
  667         int     ct_pcache_type;
  668         int     ct_pcache_unified;
  669         int     ct_pdcache_size;
  670         int     ct_pdcache_line_size;
  671         int     ct_pdcache_ways;
  672         int     ct_picache_size;
  673         int     ct_picache_line_size;
  674         int     ct_picache_ways;
  675 };
  676 
  677 struct cachetab cachetab[] = {
  678     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
  679     /* XXX is this type right for SA-1? */
  680     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
  681     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
  682     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
  683     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
  684     { 0, 0, 0, 0, 0, 0, 0, 0}
  685 };
  686 
  687 static void get_cachetype_table(void);
  688 
  689 static void
  690 get_cachetype_table()
  691 {
  692         int i;
  693         u_int32_t cpuid = cpufunc_id();
  694 
  695         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
  696                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
  697                         arm_pcache_type = cachetab[i].ct_pcache_type;
  698                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
  699                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
  700                         arm_pdcache_line_size =
  701                             cachetab[i].ct_pdcache_line_size;
  702                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
  703                         arm_picache_size = cachetab[i].ct_picache_size;
  704                         arm_picache_line_size =
  705                             cachetab[i].ct_picache_line_size;
  706                         arm_picache_ways = cachetab[i].ct_picache_ways;
  707                 }
  708         }
  709         arm_dcache_align = arm_pdcache_line_size;
  710 
  711         arm_dcache_align_mask = arm_dcache_align - 1;
  712 }
  713 
  714 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
  715 
  716 /*
  717  * Cannot panic here as we may not have a console yet ...
  718  */
  719 
  720 int
  721 set_cpufuncs()
  722 {
  723         cputype = cpufunc_id();
  724         cputype &= CPU_ID_CPU_MASK;
  725 
  726         /*
  727          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
  728          * CPU type where we want to use it by default, then we set it.
  729          */
  730 
  731 #ifdef CPU_ARM7TDMI
  732         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
  733             CPU_ID_IS7(cputype) &&
  734             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
  735                 cpufuncs = arm7tdmi_cpufuncs;
  736                 cpu_reset_needs_v4_MMU_disable = 0;
  737                 get_cachetype_cp15();
  738                 pmap_pte_init_generic();
  739                 return 0;
  740         }
  741 #endif  
  742 #ifdef CPU_ARM8
  743         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
  744             (cputype & 0x0000f000) == 0x00008000) {
  745                 cpufuncs = arm8_cpufuncs;
  746                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
  747                 get_cachetype_cp15();
  748                 pmap_pte_init_arm8();
  749                 return 0;
  750         }
  751 #endif  /* CPU_ARM8 */
  752 #ifdef CPU_ARM9
  753         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
  754              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
  755             (cputype & 0x0000f000) == 0x00009000) {
  756                 cpufuncs = arm9_cpufuncs;
  757                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
  758                 get_cachetype_cp15();
  759                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
  760                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
  761                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
  762                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
  763                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
  764 #ifdef ARM9_CACHE_WRITE_THROUGH
  765                 pmap_pte_init_arm9();
  766 #else
  767                 pmap_pte_init_generic();
  768 #endif
  769                 return 0;
  770         }
  771 #endif /* CPU_ARM9 */
  772 #ifdef CPU_ARM10
  773         if (/* cputype == CPU_ID_ARM1020T || */
  774             cputype == CPU_ID_ARM1020E) {
  775                 /*
  776                  * Select write-through cacheing (this isn't really an
  777                  * option on ARM1020T).
  778                  */
  779                 cpufuncs = arm10_cpufuncs;
  780                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
  781                 get_cachetype_cp15();
  782                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
  783                 arm10_dcache_sets_max = 
  784                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
  785                     arm10_dcache_sets_inc;
  786                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
  787                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
  788                 pmap_pte_init_generic();
  789                 return 0;
  790         }
  791 #endif /* CPU_ARM10 */
  792 #ifdef CPU_SA110
  793         if (cputype == CPU_ID_SA110) {
  794                 cpufuncs = sa110_cpufuncs;
  795                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
  796                 get_cachetype_table();
  797                 pmap_pte_init_sa1();
  798                 return 0;
  799         }
  800 #endif  /* CPU_SA110 */
  801 #ifdef CPU_SA1100
  802         if (cputype == CPU_ID_SA1100) {
  803                 cpufuncs = sa11x0_cpufuncs;
  804                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
  805                 get_cachetype_table();
  806                 pmap_pte_init_sa1();
  807                 /* Use powersave on this CPU. */
  808                 cpu_do_powersave = 1;
  809 
  810                 return 0;
  811         }
  812 #endif  /* CPU_SA1100 */
  813 #ifdef CPU_SA1110
  814         if (cputype == CPU_ID_SA1110) {
  815                 cpufuncs = sa11x0_cpufuncs;
  816                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
  817                 get_cachetype_table();
  818                 pmap_pte_init_sa1();
  819                 /* Use powersave on this CPU. */
  820                 cpu_do_powersave = 1;
  821 
  822                 return 0;
  823         }
  824 #endif  /* CPU_SA1110 */
  825 #ifdef CPU_IXP12X0
  826         if (cputype == CPU_ID_IXP1200) {
  827                 cpufuncs = ixp12x0_cpufuncs;
  828                 cpu_reset_needs_v4_MMU_disable = 1;
  829                 get_cachetype_table();
  830                 pmap_pte_init_sa1();
  831                 return 0;
  832         }
  833 #endif  /* CPU_IXP12X0 */
  834 #ifdef CPU_XSCALE_80200
  835         if (cputype == CPU_ID_80200) {
  836                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
  837 
  838                 i80200_icu_init();
  839 
  840                 /*
  841                  * Reset the Performance Monitoring Unit to a
  842                  * pristine state:
  843                  *      - CCNT, PMN0, PMN1 reset to 0
  844                  *      - overflow indications cleared
  845                  *      - all counters disabled
  846                  */
  847                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
  848                         :
  849                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
  850                                PMNC_CC_IF));
  851 
  852 #if defined(XSCALE_CCLKCFG)
  853                 /*
  854                  * Crank CCLKCFG to maximum legal value.
  855                  */
  856                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
  857                         :
  858                         : "r" (XSCALE_CCLKCFG));
  859 #endif
  860 
  861                 /*
  862                  * XXX Disable ECC in the Bus Controller Unit; we
  863                  * don't really support it, yet.  Clear any pending
  864                  * error indications.
  865                  */
  866                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
  867                         :
  868                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
  869 
  870                 cpufuncs = xscale_cpufuncs;
  871 #if defined(PERFCTRS)
  872                 xscale_pmu_init();
  873 #endif
  874 
  875                 /*
  876                  * i80200 errata: Step-A0 and A1 have a bug where
  877                  * D$ dirty bits are not cleared on "invalidate by
  878                  * address".
  879                  *
  880                  * Workaround: Clean cache line before invalidating.
  881                  */
  882                 if (rev == 0 || rev == 1)
  883                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
  884 
  885                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
  886                 get_cachetype_cp15();
  887                 pmap_pte_init_xscale();
  888                 return 0;
  889         }
  890 #endif /* CPU_XSCALE_80200 */
  891 #ifdef CPU_XSCALE_80321
  892         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
  893             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0) {
  894 
  895                 /*
  896                  * Reset the Performance Monitoring Unit to a
  897                  * pristine state:
  898                  *      - CCNT, PMN0, PMN1 reset to 0
  899                  *      - overflow indications cleared
  900                  *      - all counters disabled
  901                  */
  902                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
  903                         :
  904                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
  905                                PMNC_CC_IF));
  906 
  907                 cpufuncs = xscale_cpufuncs;
  908 #if defined(PERFCTRS)
  909                 xscale_pmu_init();
  910 #endif
  911 
  912                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
  913                 get_cachetype_cp15();
  914                 pmap_pte_init_xscale();
  915                 return 0;
  916         }
  917 #endif /* CPU_XSCALE_80321 */
  918 #ifdef CPU_XSCALE_PXA2X0
  919         /* ignore core revision to test PXA2xx CPUs */
  920         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
  921             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
  922 
  923                 cpufuncs = xscale_cpufuncs;
  924 #if defined(PERFCTRS)
  925                 xscale_pmu_init();
  926 #endif
  927 
  928                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
  929                 get_cachetype_cp15();
  930                 pmap_pte_init_xscale();
  931 
  932                 /* Use powersave on this CPU. */
  933                 cpu_do_powersave = 1;
  934 
  935                 return 0;
  936         }
  937 #endif /* CPU_XSCALE_PXA2X0 */
  938 #ifdef CPU_XSCALE_IXP425
  939         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
  940             cputype == CPU_ID_IXP425_266) {
  941                 cpufuncs = xscale_cpufuncs;
  942 #if defined(PERFCTRS)
  943                 xscale_pmu_init();
  944 #endif
  945 
  946                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
  947                 get_cachetype_cp15();
  948                 pmap_pte_init_xscale();
  949 
  950                 return 0;
  951         }
  952 #endif /* CPU_XSCALE_IXP425 */
  953         /*
  954          * Bzzzz. And the answer was ...
  955          */
  956         panic("No support for this CPU type (%08x) in kernel", cputype);
  957         return(ARCHITECTURE_NOT_PRESENT);
  958 }
  959 
  960 /*
  961  * Fixup routines for data and prefetch aborts.
  962  *
  963  * Several compile time symbols are used
  964  *
  965  * DEBUG_FAULT_CORRECTION - Print debugging information during the
  966  * correction of registers after a fault.
  967  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
  968  * when defined should use late aborts
  969  */
  970 
  971 
  972 /*
  973  * Null abort fixup routine.
  974  * For use when no fixup is required.
  975  */
  976 int
  977 cpufunc_null_fixup(arg)
  978         void *arg;
  979 {
  980         return(ABORT_FIXUP_OK);
  981 }
  982 
  983 
  984 #if defined(CPU_ARM7TDMI)
  985 
  986 #ifdef DEBUG_FAULT_CORRECTION
  987 #define DFC_PRINTF(x)           printf x
  988 #define DFC_DISASSEMBLE(x)      disassemble(x)
  989 #else
  990 #define DFC_PRINTF(x)           /* nothing */
  991 #define DFC_DISASSEMBLE(x)      /* nothing */
  992 #endif
  993 
  994 /*
  995  * "Early" data abort fixup.
  996  *
  997  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
  998  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
  999  *
 1000  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
 1001  */
 1002 int
 1003 early_abort_fixup(arg)
 1004         void *arg;
 1005 {
 1006         trapframe_t *frame = arg;
 1007         u_int fault_pc;
 1008         u_int fault_instruction;
 1009         int saved_lr = 0;
 1010 
 1011         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1012 
 1013                 /* Ok an abort in SVC mode */
 1014 
 1015                 /*
 1016                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1017                  * as the fault happened in svc mode but we need it in the
 1018                  * usr slot so we can treat the registers as an array of ints
 1019                  * during fixing.
 1020                  * NOTE: This PC is in the position but writeback is not
 1021                  * allowed on r15.
 1022                  * Doing it like this is more efficient than trapping this
 1023                  * case in all possible locations in the following fixup code.
 1024                  */
 1025 
 1026                 saved_lr = frame->tf_usr_lr;
 1027                 frame->tf_usr_lr = frame->tf_svc_lr;
 1028 
 1029                 /*
 1030                  * Note the trapframe does not have the SVC r13 so a fault
 1031                  * from an instruction with writeback to r13 in SVC mode is
 1032                  * not allowed. This should not happen as the kstack is
 1033                  * always valid.
 1034                  */
 1035         }
 1036 
 1037         /* Get fault address and status from the CPU */
 1038 
 1039         fault_pc = frame->tf_pc;
 1040         fault_instruction = *((volatile unsigned int *)fault_pc);
 1041 
 1042         /* Decode the fault instruction and fix the registers as needed */
 1043 
 1044         if ((fault_instruction & 0x0e000000) == 0x08000000) {
 1045                 int base;
 1046                 int loop;
 1047                 int count;
 1048                 int *registers = &frame->tf_r0;
 1049         
 1050                 DFC_PRINTF(("LDM/STM\n"));
 1051                 DFC_DISASSEMBLE(fault_pc);
 1052                 if (fault_instruction & (1 << 21)) {
 1053                         DFC_PRINTF(("This instruction must be corrected\n"));
 1054                         base = (fault_instruction >> 16) & 0x0f;
 1055                         if (base == 15)
 1056                                 return ABORT_FIXUP_FAILED;
 1057                         /* Count registers transferred */
 1058                         count = 0;
 1059                         for (loop = 0; loop < 16; ++loop) {
 1060                                 if (fault_instruction & (1<<loop))
 1061                                         ++count;
 1062                         }
 1063                         DFC_PRINTF(("%d registers used\n", count));
 1064                         DFC_PRINTF(("Corrected r%d by %d bytes ",
 1065                                        base, count * 4));
 1066                         if (fault_instruction & (1 << 23)) {
 1067                                 DFC_PRINTF(("down\n"));
 1068                                 registers[base] -= count * 4;
 1069                         } else {
 1070                                 DFC_PRINTF(("up\n"));
 1071                                 registers[base] += count * 4;
 1072                         }
 1073                 }
 1074         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
 1075                 int base;
 1076                 int offset;
 1077                 int *registers = &frame->tf_r0;
 1078         
 1079                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
 1080 
 1081                 DFC_DISASSEMBLE(fault_pc);
 1082 
 1083                 /* Only need to fix registers if write back is turned on */
 1084 
 1085                 if ((fault_instruction & (1 << 21)) != 0) {
 1086                         base = (fault_instruction >> 16) & 0x0f;
 1087                         if (base == 13 &&
 1088                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
 1089                                 return ABORT_FIXUP_FAILED;
 1090                         if (base == 15)
 1091                                 return ABORT_FIXUP_FAILED;
 1092 
 1093                         offset = (fault_instruction & 0xff) << 2;
 1094                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1095                         if ((fault_instruction & (1 << 23)) != 0)
 1096                                 offset = -offset;
 1097                         registers[base] += offset;
 1098                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1099                 }
 1100         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
 1101                 return ABORT_FIXUP_FAILED;
 1102 
 1103         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1104 
 1105                 /* Ok an abort in SVC mode */
 1106 
 1107                 /*
 1108                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1109                  * as the fault happened in svc mode but we need it in the
 1110                  * usr slot so we can treat the registers as an array of ints
 1111                  * during fixing.
 1112                  * NOTE: This PC is in the position but writeback is not
 1113                  * allowed on r15.
 1114                  * Doing it like this is more efficient than trapping this
 1115                  * case in all possible locations in the prior fixup code.
 1116                  */
 1117 
 1118                 frame->tf_svc_lr = frame->tf_usr_lr;
 1119                 frame->tf_usr_lr = saved_lr;
 1120 
 1121                 /*
 1122                  * Note the trapframe does not have the SVC r13 so a fault
 1123                  * from an instruction with writeback to r13 in SVC mode is
 1124                  * not allowed. This should not happen as the kstack is
 1125                  * always valid.
 1126                  */
 1127         }
 1128 
 1129         return(ABORT_FIXUP_OK);
 1130 }
 1131 #endif  /* CPU_ARM2/250/3/6/7 */
 1132 
 1133 
 1134 #if defined(CPU_ARM7TDMI)
 1135 /*
 1136  * "Late" (base updated) data abort fixup
 1137  *
 1138  * For ARM6 (in late-abort mode) and ARM7.
 1139  *
 1140  * In this model, all data-transfer instructions need fixing up.  We defer
 1141  * LDM, STM, LDC and STC fixup to the early-abort handler.
 1142  */
 1143 int
 1144 late_abort_fixup(arg)
 1145         void *arg;
 1146 {
 1147         trapframe_t *frame = arg;
 1148         u_int fault_pc;
 1149         u_int fault_instruction;
 1150         int saved_lr = 0;
 1151 
 1152         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1153 
 1154                 /* Ok an abort in SVC mode */
 1155 
 1156                 /*
 1157                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1158                  * as the fault happened in svc mode but we need it in the
 1159                  * usr slot so we can treat the registers as an array of ints
 1160                  * during fixing.
 1161                  * NOTE: This PC is in the position but writeback is not
 1162                  * allowed on r15.
 1163                  * Doing it like this is more efficient than trapping this
 1164                  * case in all possible locations in the following fixup code.
 1165                  */
 1166 
 1167                 saved_lr = frame->tf_usr_lr;
 1168                 frame->tf_usr_lr = frame->tf_svc_lr;
 1169 
 1170                 /*
 1171                  * Note the trapframe does not have the SVC r13 so a fault
 1172                  * from an instruction with writeback to r13 in SVC mode is
 1173                  * not allowed. This should not happen as the kstack is
 1174                  * always valid.
 1175                  */
 1176         }
 1177 
 1178         /* Get fault address and status from the CPU */
 1179 
 1180         fault_pc = frame->tf_pc;
 1181         fault_instruction = *((volatile unsigned int *)fault_pc);
 1182 
 1183         /* Decode the fault instruction and fix the registers as needed */
 1184 
 1185         /* Was is a swap instruction ? */
 1186 
 1187         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
 1188                 DFC_DISASSEMBLE(fault_pc);
 1189         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
 1190 
 1191                 /* Was is a ldr/str instruction */
 1192                 /* This is for late abort only */
 1193 
 1194                 int base;
 1195                 int offset;
 1196                 int *registers = &frame->tf_r0;
 1197 
 1198                 DFC_DISASSEMBLE(fault_pc);
 1199                 
 1200                 /* This is for late abort only */
 1201 
 1202                 if ((fault_instruction & (1 << 24)) == 0
 1203                     || (fault_instruction & (1 << 21)) != 0) {  
 1204                         /* postindexed ldr/str with no writeback */
 1205 
 1206                         base = (fault_instruction >> 16) & 0x0f;
 1207                         if (base == 13 &&
 1208                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
 1209                                 return ABORT_FIXUP_FAILED;
 1210                         if (base == 15)
 1211                                 return ABORT_FIXUP_FAILED;
 1212                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
 1213                                        base, registers[base]));
 1214                         if ((fault_instruction & (1 << 25)) == 0) {
 1215                                 /* Immediate offset - easy */
 1216 
 1217                                 offset = fault_instruction & 0xfff;
 1218                                 if ((fault_instruction & (1 << 23)))
 1219                                         offset = -offset;
 1220                                 registers[base] += offset;
 1221                                 DFC_PRINTF(("imm=%08x ", offset));
 1222                         } else {
 1223                                 /* offset is a shifted register */
 1224                                 int shift;
 1225 
 1226                                 offset = fault_instruction & 0x0f;
 1227                                 if (offset == base)
 1228                                         return ABORT_FIXUP_FAILED;
 1229                 
 1230                                 /*
 1231                                  * Register offset - hard we have to
 1232                                  * cope with shifts !
 1233                                  */
 1234                                 offset = registers[offset];
 1235 
 1236                                 if ((fault_instruction & (1 << 4)) == 0)
 1237                                         /* shift with amount */
 1238                                         shift = (fault_instruction >> 7) & 0x1f;
 1239                                 else {
 1240                                         /* shift with register */
 1241                                         if ((fault_instruction & (1 << 7)) != 0)
 1242                                                 /* undefined for now so bail out */
 1243                                                 return ABORT_FIXUP_FAILED;
 1244                                         shift = ((fault_instruction >> 8) & 0xf);
 1245                                         if (base == shift)
 1246                                                 return ABORT_FIXUP_FAILED;
 1247                                         DFC_PRINTF(("shift reg=%d ", shift));
 1248                                         shift = registers[shift];
 1249                                 }
 1250                                 DFC_PRINTF(("shift=%08x ", shift));
 1251                                 switch (((fault_instruction >> 5) & 0x3)) {
 1252                                 case 0 : /* Logical left */
 1253                                         offset = (int)(((u_int)offset) << shift);
 1254                                         break;
 1255                                 case 1 : /* Logical Right */
 1256                                         if (shift == 0) shift = 32;
 1257                                         offset = (int)(((u_int)offset) >> shift);
 1258                                         break;
 1259                                 case 2 : /* Arithmetic Right */
 1260                                         if (shift == 0) shift = 32;
 1261                                         offset = (int)(((int)offset) >> shift);
 1262                                         break;
 1263                                 case 3 : /* Rotate right (rol or rxx) */
 1264                                         return ABORT_FIXUP_FAILED;
 1265                                         break;
 1266                                 }
 1267 
 1268                                 DFC_PRINTF(("abt: fixed LDR/STR with "
 1269                                                "register offset\n"));
 1270                                 if ((fault_instruction & (1 << 23)))
 1271                                         offset = -offset;
 1272                                 DFC_PRINTF(("offset=%08x ", offset));
 1273                                 registers[base] += offset;
 1274                         }
 1275                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
 1276                 }
 1277         }
 1278 
 1279         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
 1280 
 1281                 /* Ok an abort in SVC mode */
 1282 
 1283                 /*
 1284                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
 1285                  * as the fault happened in svc mode but we need it in the
 1286                  * usr slot so we can treat the registers as an array of ints
 1287                  * during fixing.
 1288                  * NOTE: This PC is in the position but writeback is not
 1289                  * allowed on r15.
 1290                  * Doing it like this is more efficient than trapping this
 1291                  * case in all possible locations in the prior fixup code.
 1292                  */
 1293 
 1294                 frame->tf_svc_lr = frame->tf_usr_lr;
 1295                 frame->tf_usr_lr = saved_lr;
 1296 
 1297                 /*
 1298                  * Note the trapframe does not have the SVC r13 so a fault
 1299                  * from an instruction with writeback to r13 in SVC mode is
 1300                  * not allowed. This should not happen as the kstack is
 1301                  * always valid.
 1302                  */
 1303         }
 1304 
 1305         /*
 1306          * Now let the early-abort fixup routine have a go, in case it
 1307          * was an LDM, STM, LDC or STC that faulted.
 1308          */
 1309 
 1310         return early_abort_fixup(arg);
 1311 }
 1312 #endif  /* CPU_ARM7TDMI */
 1313 
 1314 /*
 1315  * CPU Setup code
 1316  */
 1317 
 1318 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
 1319     defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
 1320         defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
 1321         defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
 1322 
 1323 #define IGN     0
 1324 #define OR      1
 1325 #define BIC     2
 1326 
 1327 struct cpu_option {
 1328         char    *co_name;
 1329         int     co_falseop;
 1330         int     co_trueop;
 1331         int     co_value;
 1332 };
 1333 
 1334 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
 1335 
 1336 static u_int
 1337 parse_cpu_options(args, optlist, cpuctrl)
 1338         char *args;
 1339         struct cpu_option *optlist;    
 1340         u_int cpuctrl; 
 1341 {
 1342         int integer;
 1343 
 1344         if (args == NULL)
 1345                 return(cpuctrl);
 1346 
 1347         while (optlist->co_name) {
 1348                 if (get_bootconf_option(args, optlist->co_name,
 1349                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1350                         if (integer) {
 1351                                 if (optlist->co_trueop == OR)
 1352                                         cpuctrl |= optlist->co_value;
 1353                                 else if (optlist->co_trueop == BIC)
 1354                                         cpuctrl &= ~optlist->co_value;
 1355                         } else {
 1356                                 if (optlist->co_falseop == OR)
 1357                                         cpuctrl |= optlist->co_value;
 1358                                 else if (optlist->co_falseop == BIC)
 1359                                         cpuctrl &= ~optlist->co_value;
 1360                         }
 1361                 }
 1362                 ++optlist;
 1363         }
 1364         return(cpuctrl);
 1365 }
 1366 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */
 1367 
 1368 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
 1369 struct cpu_option arm678_options[] = {
 1370 #ifdef COMPAT_12
 1371         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
 1372         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 1373 #endif  /* COMPAT_12 */
 1374         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 1375         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 1376         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1377         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1378         { NULL,                 IGN, IGN, 0 }
 1379 };
 1380 
 1381 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
 1382 
 1383 #ifdef CPU_ARM7TDMI
 1384 struct cpu_option arm7tdmi_options[] = {
 1385         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 1386         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 1387         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1388         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1389 #ifdef COMPAT_12
 1390         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
 1391 #endif  /* COMPAT_12 */
 1392         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
 1393         { NULL,                 IGN, IGN, 0 }
 1394 };
 1395 
 1396 void
 1397 arm7tdmi_setup(args)
 1398         char *args;
 1399 {
 1400         int cpuctrl;
 1401 
 1402         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1403                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1404                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
 1405 
 1406         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
 1407         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
 1408 
 1409 #ifdef __ARMEB__
 1410         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1411 #endif
 1412 
 1413         /* Clear out the cache */
 1414         cpu_idcache_wbinv_all();
 1415 
 1416         /* Set the control register */
 1417         ctrl = cpuctrl;
 1418         cpu_control(0xffffffff, cpuctrl);
 1419 }
 1420 #endif  /* CPU_ARM7TDMI */
 1421 
 1422 #ifdef CPU_ARM8
 1423 struct cpu_option arm8_options[] = {
 1424         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
 1425         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
 1426         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1427         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1428 #ifdef COMPAT_12
 1429         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1430 #endif  /* COMPAT_12 */
 1431         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1432         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1433         { NULL,                 IGN, IGN, 0 }
 1434 };
 1435 
 1436 void
 1437 arm8_setup(args)
 1438         char *args;
 1439 {
 1440         int integer;
 1441         int cpuctrl, cpuctrlmask;
 1442         int clocktest;
 1443         int setclock = 0;
 1444 
 1445         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1446                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1447                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
 1448         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1449                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1450                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
 1451                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
 1452                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
 1453 
 1454 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1455         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1456 #endif
 1457 
 1458         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
 1459         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
 1460 
 1461 #ifdef __ARMEB__
 1462         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1463 #endif
 1464 
 1465         /* Get clock configuration */
 1466         clocktest = arm8_clock_config(0, 0) & 0x0f;
 1467 
 1468         /* Special ARM8 clock and test configuration */
 1469         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1470                 clocktest = 0;
 1471                 setclock = 1;
 1472         }
 1473         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1474                 if (integer)
 1475                         clocktest |= 0x01;
 1476                 else
 1477                         clocktest &= ~(0x01);
 1478                 setclock = 1;
 1479         }
 1480         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
 1481                 if (integer)
 1482                         clocktest |= 0x02;
 1483                 else
 1484                         clocktest &= ~(0x02);
 1485                 setclock = 1;
 1486         }
 1487         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
 1488                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
 1489                 setclock = 1;
 1490         }
 1491         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
 1492                 clocktest |= (integer & 7) << 5;
 1493                 setclock = 1;
 1494         }
 1495         
 1496         /* Clear out the cache */
 1497         cpu_idcache_wbinv_all();
 1498 
 1499         /* Set the control register */
 1500         ctrl = cpuctrl;
 1501         cpu_control(0xffffffff, cpuctrl);
 1502 
 1503         /* Set the clock/test register */    
 1504         if (setclock)
 1505                 arm8_clock_config(0x7f, clocktest);
 1506 }
 1507 #endif  /* CPU_ARM8 */
 1508 
 1509 #ifdef CPU_ARM9
 1510 struct cpu_option arm9_options[] = {
 1511         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1512         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1513         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1514         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1515         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1516         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1517         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1518         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1519         { NULL,                 IGN, IGN, 0 }
 1520 };
 1521 
 1522 void
 1523 arm9_setup(args)
 1524         char *args;
 1525 {
 1526         int cpuctrl, cpuctrlmask;
 1527 
 1528         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1529             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1530             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1531             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 1532             | CPU_CONTROL_ROUNDROBIN;
 1533         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1534                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1535                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1536                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1537                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1538                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
 1539                  | CPU_CONTROL_ROUNDROBIN;
 1540 
 1541 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1542         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1543 #endif
 1544 
 1545         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
 1546 
 1547 #ifdef __ARMEB__
 1548         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1549 #endif
 1550         if (vector_page == ARM_VECTORS_HIGH)
 1551                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1552 
 1553         /* Clear out the cache */
 1554         cpu_idcache_wbinv_all();
 1555 
 1556         /* Set the control register */
 1557         cpu_control(cpuctrlmask, cpuctrl);
 1558         ctrl = cpuctrl;
 1559 
 1560 }
 1561 #endif  /* CPU_ARM9 */
 1562 
 1563 #ifdef CPU_ARM10
 1564 struct cpu_option arm10_options[] = {
 1565         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1566         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1567         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1568         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1569         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1570         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1571         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1572         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1573         { NULL,                 IGN, IGN, 0 }
 1574 };
 1575 
 1576 void
 1577 arm10_setup(args)
 1578         char *args;
 1579 {
 1580         int cpuctrl, cpuctrlmask;
 1581 
 1582         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1583             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
 1584             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
 1585         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
 1586             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1587             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1588             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1589             | CPU_CONTROL_BPRD_ENABLE
 1590             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
 1591 
 1592 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1593         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1594 #endif
 1595 
 1596         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
 1597 
 1598 #ifdef __ARMEB__
 1599         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1600 #endif
 1601 
 1602         /* Clear out the cache */
 1603         cpu_idcache_wbinv_all();
 1604 
 1605         /* Now really make sure they are clean.  */
 1606         asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
 1607 
 1608         /* Set the control register */
 1609         ctrl = cpuctrl;
 1610         cpu_control(0xffffffff, cpuctrl);
 1611 
 1612         /* And again. */
 1613         cpu_idcache_wbinv_all();
 1614 }
 1615 #endif  /* CPU_ARM10 */
 1616 
 1617 #ifdef CPU_SA110
 1618 struct cpu_option sa110_options[] = {
 1619 #ifdef COMPAT_12
 1620         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1621         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 1622 #endif  /* COMPAT_12 */
 1623         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1624         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1625         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1626         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1627         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1628         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1629         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1630         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1631         { NULL,                 IGN, IGN, 0 }
 1632 };
 1633 
 1634 void
 1635 sa110_setup(args)
 1636         char *args;
 1637 {
 1638         int cpuctrl, cpuctrlmask;
 1639 
 1640         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1641                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1642                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1643                  | CPU_CONTROL_WBUF_ENABLE;
 1644         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1645                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1646                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1647                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1648                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1649                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 1650                  | CPU_CONTROL_CPCLK;
 1651 
 1652 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1653         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1654 #endif
 1655 
 1656         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
 1657 
 1658 #ifdef __ARMEB__
 1659         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1660 #endif
 1661 
 1662         /* Clear out the cache */
 1663         cpu_idcache_wbinv_all();
 1664 
 1665         /* Set the control register */
 1666         ctrl = cpuctrl;
 1667 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 1668         cpu_control(0xffffffff, cpuctrl);
 1669 
 1670         /* 
 1671          * enable clockswitching, note that this doesn't read or write to r0,
 1672          * r0 is just to make it valid asm
 1673          */
 1674         __asm ("mcr 15, 0, r0, c15, c1, 2");
 1675 }
 1676 #endif  /* CPU_SA110 */
 1677 
 1678 #if defined(CPU_SA1100) || defined(CPU_SA1110)
 1679 struct cpu_option sa11x0_options[] = {
 1680 #ifdef COMPAT_12
 1681         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1682         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
 1683 #endif  /* COMPAT_12 */
 1684         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1685         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1686         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1687         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1688         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1689         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1690         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1691         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1692         { NULL,                 IGN, IGN, 0 }
 1693 };
 1694 
 1695 void
 1696 sa11x0_setup(args)
 1697         char *args;
 1698 {
 1699         int cpuctrl, cpuctrlmask;
 1700 
 1701         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1702                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1703                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1704                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
 1705         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1706                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1707                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1708                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1709                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1710                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 1711                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 1712 
 1713 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1714         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1715 #endif
 1716 
 1717 
 1718         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
 1719 
 1720 #ifdef __ARMEB__
 1721         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1722 #endif
 1723 
 1724         if (vector_page == ARM_VECTORS_HIGH)
 1725                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1726         /* Clear out the cache */
 1727         cpu_idcache_wbinv_all();
 1728         /* Set the control register */    
 1729         ctrl = cpuctrl;
 1730         cpu_control(0xffffffff, cpuctrl);
 1731 }
 1732 #endif  /* CPU_SA1100 || CPU_SA1110 */
 1733 
 1734 #if defined(CPU_IXP12X0)
 1735 struct cpu_option ixp12x0_options[] = {
 1736         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1737         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1738         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1739         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1740         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1741         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1742         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
 1743         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
 1744         { NULL,                 IGN, IGN, 0 }
 1745 };
 1746 
 1747 void
 1748 ixp12x0_setup(args)
 1749         char *args;
 1750 {
 1751         int cpuctrl, cpuctrlmask;
 1752 
 1753 
 1754         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
 1755                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
 1756                  | CPU_CONTROL_IC_ENABLE;
 1757 
 1758         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1759                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
 1760                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
 1761                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
 1762                  | CPU_CONTROL_VECRELOC;
 1763 
 1764 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1765         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1766 #endif
 1767 
 1768         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
 1769 
 1770 #ifdef __ARMEB__
 1771         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1772 #endif
 1773 
 1774         if (vector_page == ARM_VECTORS_HIGH)
 1775                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1776 
 1777         /* Clear out the cache */
 1778         cpu_idcache_wbinv_all();
 1779 
 1780         /* Set the control register */    
 1781         ctrl = cpuctrl;
 1782         /* cpu_control(0xffffffff, cpuctrl); */
 1783         cpu_control(cpuctrlmask, cpuctrl);
 1784 }
 1785 #endif /* CPU_IXP12X0 */
 1786 
 1787 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
 1788     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
 1789 struct cpu_option xscale_options[] = {
 1790 #ifdef COMPAT_12
 1791         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1792         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1793 #endif  /* COMPAT_12 */
 1794         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1795         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1796         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1797         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
 1798         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
 1799         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
 1800         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
 1801         { NULL,                 IGN, IGN, 0 }
 1802 };
 1803 
 1804 void
 1805 xscale_setup(args)
 1806         char *args;
 1807 {
 1808         uint32_t auxctl;
 1809         int cpuctrl, cpuctrlmask;
 1810 
 1811         /*
 1812          * The XScale Write Buffer is always enabled.  Our option
 1813          * is to enable/disable coalescing.  Note that bits 6:3
 1814          * must always be enabled.
 1815          */
 1816 
 1817         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1818                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1819                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1820                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
 1821                  | CPU_CONTROL_BPRD_ENABLE;
 1822         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
 1823                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
 1824                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
 1825                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
 1826                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
 1827                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
 1828                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
 1829 
 1830 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
 1831         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
 1832 #endif
 1833 
 1834         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
 1835 
 1836 #ifdef __ARMEB__
 1837         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
 1838 #endif
 1839 
 1840         if (vector_page == ARM_VECTORS_HIGH)
 1841                 cpuctrl |= CPU_CONTROL_VECRELOC;
 1842 
 1843         /* Clear out the cache */
 1844         cpu_idcache_wbinv_all();
 1845 
 1846         /*
 1847          * Set the control register.  Note that bits 6:3 must always
 1848          * be set to 1.
 1849          */
 1850         ctrl = cpuctrl;
 1851 /*      cpu_control(cpuctrlmask, cpuctrl);*/
 1852         cpu_control(0xffffffff, cpuctrl);
 1853 
 1854         /* Make sure write coalescing is turned on */
 1855         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
 1856                 : "=r" (auxctl));
 1857 #ifdef XSCALE_NO_COALESCE_WRITES
 1858         auxctl |= XSCALE_AUXCTL_K;
 1859 #else
 1860         auxctl &= ~XSCALE_AUXCTL_K;
 1861 #endif
 1862         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
 1863                 : : "r" (auxctl));
 1864 }
 1865 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */

Cache object: d2cbcc4d69e27700b6985b1912f16ab6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.