The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/initcpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) KATO Takenori, 1997, 1998.
    3  * 
    4  * All rights reserved.  Unpublished rights reserved under the copyright
    5  * laws of Japan.
    6  * 
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer as
   13  *    the first lines of this file unmodified.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/6.3/sys/i386/i386/initcpu.c 173886 2007-11-24 19:45:58Z cvs2svn $");
   32 
   33 #include "opt_cpu.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/kernel.h>
   37 #include <sys/systm.h>
   38 #include <sys/sysctl.h>
   39 
   40 #include <machine/cputypes.h>
   41 #include <machine/md_var.h>
   42 #include <machine/specialreg.h>
   43 
   44 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
   45 #define CPU_ENABLE_SSE
   46 #endif
   47 
   48 void initializecpu(void);
   49 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
   50 void    enable_K5_wt_alloc(void);
   51 void    enable_K6_wt_alloc(void);
   52 void    enable_K6_2_wt_alloc(void);
   53 #endif
   54 
   55 #ifdef I486_CPU
   56 static void init_5x86(void);
   57 static void init_bluelightning(void);
   58 static void init_486dlc(void);
   59 static void init_cy486dx(void);
   60 #ifdef CPU_I486_ON_386
   61 static void init_i486_on_386(void);
   62 #endif
   63 static void init_6x86(void);
   64 #endif /* I486_CPU */
   65 
   66 #ifdef I686_CPU
   67 static void     init_6x86MX(void);
   68 static void     init_ppro(void);
   69 static void     init_mendocino(void);
   70 #endif
   71 
   72 static int      hw_instruction_sse;
   73 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
   74     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
   75 
   76 /* Must *NOT* be BSS or locore will bzero these after setting them */
   77 int     cpu = 0;                /* Are we 386, 386sx, 486, etc? */
   78 u_int   cpu_feature = 0;        /* Feature flags */
   79 u_int   cpu_feature2 = 0;       /* Feature flags */
   80 u_int   amd_feature = 0;        /* AMD feature flags */
   81 u_int   amd_feature2 = 0;       /* AMD feature flags */
   82 u_int   via_feature_rng = 0;    /* VIA RNG features */
   83 u_int   via_feature_xcrypt = 0; /* VIA ACE features */
   84 u_int   cpu_high = 0;           /* Highest arg to CPUID */
   85 u_int   cpu_id = 0;             /* Stepping ID */
   86 u_int   cpu_procinfo = 0;       /* HyperThreading Info / Brand Index / CLFUSH */
   87 u_int   cpu_procinfo2 = 0;      /* Multicore info */
   88 char    cpu_vendor[20] = "";    /* CPU Origin code */
   89 
   90 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
   91         &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
   92 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
   93         &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
   94 
   95 #ifdef CPU_ENABLE_SSE
   96 u_int   cpu_fxsr;               /* SSE enabled */
   97 u_int   cpu_mxcsr_mask;         /* valid bits in mxcsr */
   98 #endif
   99 
  100 #ifdef I486_CPU
  101 /*
  102  * IBM Blue Lightning
  103  */
  104 static void
  105 init_bluelightning(void)
  106 {
  107         u_long  eflags;
  108 
  109 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  110         need_post_dma_flush = 1;
  111 #endif
  112 
  113         eflags = read_eflags();
  114         disable_intr();
  115 
  116         load_cr0(rcr0() | CR0_CD | CR0_NW);
  117         invd();
  118 
  119 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
  120         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
  121 #else
  122         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
  123 #endif
  124         /* Enables 13MB and 0-640KB cache. */
  125         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
  126 #ifdef CPU_BLUELIGHTNING_3X
  127         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
  128 #else
  129         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
  130 #endif
  131 
  132         /* Enable caching in CR0. */
  133         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  134         invd();
  135         write_eflags(eflags);
  136 }
  137 
  138 /*
  139  * Cyrix 486SLC/DLC/SR/DR series
  140  */
  141 static void
  142 init_486dlc(void)
  143 {
  144         u_long  eflags;
  145         u_char  ccr0;
  146 
  147         eflags = read_eflags();
  148         disable_intr();
  149         invd();
  150 
  151         ccr0 = read_cyrix_reg(CCR0);
  152 #ifndef CYRIX_CACHE_WORKS
  153         ccr0 |= CCR0_NC1 | CCR0_BARB;
  154         write_cyrix_reg(CCR0, ccr0);
  155         invd();
  156 #else
  157         ccr0 &= ~CCR0_NC0;
  158 #ifndef CYRIX_CACHE_REALLY_WORKS
  159         ccr0 |= CCR0_NC1 | CCR0_BARB;
  160 #else
  161         ccr0 |= CCR0_NC1;
  162 #endif
  163 #ifdef CPU_DIRECT_MAPPED_CACHE
  164         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
  165 #endif
  166         write_cyrix_reg(CCR0, ccr0);
  167 
  168         /* Clear non-cacheable region. */
  169         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
  170         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
  171         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
  172         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
  173 
  174         write_cyrix_reg(0, 0);  /* dummy write */
  175 
  176         /* Enable caching in CR0. */
  177         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  178         invd();
  179 #endif /* !CYRIX_CACHE_WORKS */
  180         write_eflags(eflags);
  181 }
  182 
  183 
  184 /*
  185  * Cyrix 486S/DX series
  186  */
  187 static void
  188 init_cy486dx(void)
  189 {
  190         u_long  eflags;
  191         u_char  ccr2;
  192 
  193         eflags = read_eflags();
  194         disable_intr();
  195         invd();
  196 
  197         ccr2 = read_cyrix_reg(CCR2);
  198 #ifdef CPU_SUSP_HLT
  199         ccr2 |= CCR2_SUSP_HLT;
  200 #endif
  201 
  202 #ifdef PC98
  203         /* Enables WB cache interface pin and Lock NW bit in CR0. */
  204         ccr2 |= CCR2_WB | CCR2_LOCK_NW;
  205         /* Unlock NW bit in CR0. */
  206         write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
  207         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  208 #endif
  209 
  210         write_cyrix_reg(CCR2, ccr2);
  211         write_eflags(eflags);
  212 }
  213 
  214 
  215 /*
  216  * Cyrix 5x86
  217  */
  218 static void
  219 init_5x86(void)
  220 {
  221         u_long  eflags;
  222         u_char  ccr2, ccr3, ccr4, pcr0;
  223 
  224         eflags = read_eflags();
  225         disable_intr();
  226 
  227         load_cr0(rcr0() | CR0_CD | CR0_NW);
  228         wbinvd();
  229 
  230         (void)read_cyrix_reg(CCR3);             /* dummy */
  231 
  232         /* Initialize CCR2. */
  233         ccr2 = read_cyrix_reg(CCR2);
  234         ccr2 |= CCR2_WB;
  235 #ifdef CPU_SUSP_HLT
  236         ccr2 |= CCR2_SUSP_HLT;
  237 #else
  238         ccr2 &= ~CCR2_SUSP_HLT;
  239 #endif
  240         ccr2 |= CCR2_WT1;
  241         write_cyrix_reg(CCR2, ccr2);
  242 
  243         /* Initialize CCR4. */
  244         ccr3 = read_cyrix_reg(CCR3);
  245         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  246 
  247         ccr4 = read_cyrix_reg(CCR4);
  248         ccr4 |= CCR4_DTE;
  249         ccr4 |= CCR4_MEM;
  250 #ifdef CPU_FASTER_5X86_FPU
  251         ccr4 |= CCR4_FASTFPE;
  252 #else
  253         ccr4 &= ~CCR4_FASTFPE;
  254 #endif
  255         ccr4 &= ~CCR4_IOMASK;
  256         /********************************************************************
  257          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
  258          * should be 0 for errata fix.
  259          ********************************************************************/
  260 #ifdef CPU_IORT
  261         ccr4 |= CPU_IORT & CCR4_IOMASK;
  262 #endif
  263         write_cyrix_reg(CCR4, ccr4);
  264 
  265         /* Initialize PCR0. */
  266         /****************************************************************
  267          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
  268          * BTB_EN might make your system unstable.
  269          ****************************************************************/
  270         pcr0 = read_cyrix_reg(PCR0);
  271 #ifdef CPU_RSTK_EN
  272         pcr0 |= PCR0_RSTK;
  273 #else
  274         pcr0 &= ~PCR0_RSTK;
  275 #endif
  276 #ifdef CPU_BTB_EN
  277         pcr0 |= PCR0_BTB;
  278 #else
  279         pcr0 &= ~PCR0_BTB;
  280 #endif
  281 #ifdef CPU_LOOP_EN
  282         pcr0 |= PCR0_LOOP;
  283 #else
  284         pcr0 &= ~PCR0_LOOP;
  285 #endif
  286 
  287         /****************************************************************
  288          * WARNING: if you use a memory mapped I/O device, don't use
  289          * DISABLE_5X86_LSSER option, which may reorder memory mapped
  290          * I/O access.
  291          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
  292          ****************************************************************/
  293 #ifdef CPU_DISABLE_5X86_LSSER
  294         pcr0 &= ~PCR0_LSSER;
  295 #else
  296         pcr0 |= PCR0_LSSER;
  297 #endif
  298         write_cyrix_reg(PCR0, pcr0);
  299 
  300         /* Restore CCR3. */
  301         write_cyrix_reg(CCR3, ccr3);
  302 
  303         (void)read_cyrix_reg(0x80);             /* dummy */
  304 
  305         /* Unlock NW bit in CR0. */
  306         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  307         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  308         /* Lock NW bit in CR0. */
  309         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  310 
  311         write_eflags(eflags);
  312 }
  313 
  314 #ifdef CPU_I486_ON_386
  315 /*
  316  * There are i486 based upgrade products for i386 machines.
  317  * In this case, BIOS doesn't enables CPU cache.
  318  */
  319 static void
  320 init_i486_on_386(void)
  321 {
  322         u_long  eflags;
  323 
  324 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  325         need_post_dma_flush = 1;
  326 #endif
  327 
  328         eflags = read_eflags();
  329         disable_intr();
  330 
  331         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
  332 
  333         write_eflags(eflags);
  334 }
  335 #endif
  336 
  337 /*
  338  * Cyrix 6x86
  339  *
  340  * XXX - What should I do here?  Please let me know.
  341  */
  342 static void
  343 init_6x86(void)
  344 {
  345         u_long  eflags;
  346         u_char  ccr3, ccr4;
  347 
  348         eflags = read_eflags();
  349         disable_intr();
  350 
  351         load_cr0(rcr0() | CR0_CD | CR0_NW);
  352         wbinvd();
  353 
  354         /* Initialize CCR0. */
  355         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  356 
  357         /* Initialize CCR1. */
  358 #ifdef CPU_CYRIX_NO_LOCK
  359         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  360 #else
  361         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  362 #endif
  363 
  364         /* Initialize CCR2. */
  365 #ifdef CPU_SUSP_HLT
  366         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  367 #else
  368         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  369 #endif
  370 
  371         ccr3 = read_cyrix_reg(CCR3);
  372         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  373 
  374         /* Initialize CCR4. */
  375         ccr4 = read_cyrix_reg(CCR4);
  376         ccr4 |= CCR4_DTE;
  377         ccr4 &= ~CCR4_IOMASK;
  378 #ifdef CPU_IORT
  379         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  380 #else
  381         write_cyrix_reg(CCR4, ccr4 | 7);
  382 #endif
  383 
  384         /* Initialize CCR5. */
  385 #ifdef CPU_WT_ALLOC
  386         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  387 #endif
  388 
  389         /* Restore CCR3. */
  390         write_cyrix_reg(CCR3, ccr3);
  391 
  392         /* Unlock NW bit in CR0. */
  393         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  394 
  395         /*
  396          * Earlier revision of the 6x86 CPU could crash the system if
  397          * L1 cache is in write-back mode.
  398          */
  399         if ((cyrix_did & 0xff00) > 0x1600)
  400                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  401         else {
  402                 /* Revision 2.6 and lower. */
  403 #ifdef CYRIX_CACHE_REALLY_WORKS
  404                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  405 #else
  406                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
  407 #endif
  408         }
  409 
  410         /* Lock NW bit in CR0. */
  411         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  412 
  413         write_eflags(eflags);
  414 }
  415 #endif /* I486_CPU */
  416 
  417 #ifdef I686_CPU
  418 /*
  419  * Cyrix 6x86MX (code-named M2)
  420  *
  421  * XXX - What should I do here?  Please let me know.
  422  */
  423 static void
  424 init_6x86MX(void)
  425 {
  426         u_long  eflags;
  427         u_char  ccr3, ccr4;
  428 
  429         eflags = read_eflags();
  430         disable_intr();
  431 
  432         load_cr0(rcr0() | CR0_CD | CR0_NW);
  433         wbinvd();
  434 
  435         /* Initialize CCR0. */
  436         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  437 
  438         /* Initialize CCR1. */
  439 #ifdef CPU_CYRIX_NO_LOCK
  440         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  441 #else
  442         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  443 #endif
  444 
  445         /* Initialize CCR2. */
  446 #ifdef CPU_SUSP_HLT
  447         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  448 #else
  449         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  450 #endif
  451 
  452         ccr3 = read_cyrix_reg(CCR3);
  453         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  454 
  455         /* Initialize CCR4. */
  456         ccr4 = read_cyrix_reg(CCR4);
  457         ccr4 &= ~CCR4_IOMASK;
  458 #ifdef CPU_IORT
  459         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  460 #else
  461         write_cyrix_reg(CCR4, ccr4 | 7);
  462 #endif
  463 
  464         /* Initialize CCR5. */
  465 #ifdef CPU_WT_ALLOC
  466         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  467 #endif
  468 
  469         /* Restore CCR3. */
  470         write_cyrix_reg(CCR3, ccr3);
  471 
  472         /* Unlock NW bit in CR0. */
  473         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  474 
  475         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  476 
  477         /* Lock NW bit in CR0. */
  478         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  479 
  480         write_eflags(eflags);
  481 }
  482 
  483 static void
  484 init_ppro(void)
  485 {
  486         u_int64_t       apicbase;
  487 
  488         /*
  489          * Local APIC should be disabled if it is not going to be used.
  490          */
  491         apicbase = rdmsr(MSR_APICBASE);
  492         apicbase &= ~APICBASE_ENABLED;
  493         wrmsr(MSR_APICBASE, apicbase);
  494 }
  495 
  496 /*
  497  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
  498  * L2 cache).
  499  */
  500 static void
  501 init_mendocino(void)
  502 {
  503 #ifdef CPU_PPRO2CELERON
  504         u_long  eflags;
  505         u_int64_t       bbl_cr_ctl3;
  506 
  507         eflags = read_eflags();
  508         disable_intr();
  509 
  510         load_cr0(rcr0() | CR0_CD | CR0_NW);
  511         wbinvd();
  512 
  513         bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
  514 
  515         /* If the L2 cache is configured, do nothing. */
  516         if (!(bbl_cr_ctl3 & 1)) {
  517                 bbl_cr_ctl3 = 0x134052bLL;
  518 
  519                 /* Set L2 Cache Latency (Default: 5). */
  520 #ifdef  CPU_CELERON_L2_LATENCY
  521 #if CPU_L2_LATENCY > 15
  522 #error invalid CPU_L2_LATENCY.
  523 #endif
  524                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
  525 #else
  526                 bbl_cr_ctl3 |= 5 << 1;
  527 #endif
  528                 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
  529         }
  530 
  531         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
  532         write_eflags(eflags);
  533 #endif /* CPU_PPRO2CELERON */
  534 }
  535 
  536 /*
  537  * Initialize special VIA C3/C7 features
  538  */
  539 static void
  540 init_via(void)
  541 {
  542         u_int regs[4], val;
  543         u_int64_t msreg;
  544 
  545         do_cpuid(0xc0000000, regs);
  546         val = regs[0];
  547         if (val >= 0xc0000001) {
  548                 do_cpuid(0xc0000001, regs);
  549                 val = regs[3];
  550         } else
  551                 val = 0;
  552 
  553         /* Enable RNG if present and disabled */
  554         if (val & VIA_CPUID_HAS_RNG) {
  555                 if (!(val & VIA_CPUID_DO_RNG)) {
  556                         msreg = rdmsr(0x110B);
  557                         msreg |= 0x40;
  558                         wrmsr(0x110B, msreg);
  559                 }
  560                 via_feature_rng = VIA_HAS_RNG;
  561         }
  562         /* Enable AES engine if present and disabled */
  563         if (val & VIA_CPUID_HAS_ACE) {
  564                 if (!(val & VIA_CPUID_DO_ACE)) {
  565                         msreg = rdmsr(0x1107);
  566                         msreg |= (0x01 << 28);
  567                         wrmsr(0x1107, msreg);
  568                 }
  569                 via_feature_xcrypt |= VIA_HAS_AES;
  570         }
  571         /* Enable ACE2 engine if present and disabled */
  572         if (val & VIA_CPUID_HAS_ACE2) {
  573                 if (!(val & VIA_CPUID_DO_ACE2)) {
  574                         msreg = rdmsr(0x1107);
  575                         msreg |= (0x01 << 28);
  576                         wrmsr(0x1107, msreg);
  577                 }
  578                 via_feature_xcrypt |= VIA_HAS_AESCTR;
  579         }
  580         /* Enable SHA engine if present and disabled */
  581         if (val & VIA_CPUID_HAS_PHE) {
  582                 if (!(val & VIA_CPUID_DO_PHE)) {
  583                         msreg = rdmsr(0x1107);
  584                         msreg |= (0x01 << 28/**/);
  585                         wrmsr(0x1107, msreg);
  586                 }
  587                 via_feature_xcrypt |= VIA_HAS_SHA;
  588         }
  589         /* Enable MM engine if present and disabled */
  590         if (val & VIA_CPUID_HAS_PMM) {
  591                 if (!(val & VIA_CPUID_DO_PMM)) {
  592                         msreg = rdmsr(0x1107);
  593                         msreg |= (0x01 << 28/**/);
  594                         wrmsr(0x1107, msreg);
  595                 }
  596                 via_feature_xcrypt |= VIA_HAS_MM;
  597         }
  598 }
  599 
  600 #endif /* I686_CPU */
  601 
  602 /*
  603  * Initialize CR4 (Control register 4) to enable SSE instructions.
  604  */
  605 void
  606 enable_sse(void)
  607 {
  608 #if defined(CPU_ENABLE_SSE)
  609         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
  610                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
  611                 cpu_fxsr = hw_instruction_sse = 1;
  612         }
  613 #endif
  614 }
  615 
  616 void
  617 initializecpu(void)
  618 {
  619 
  620         switch (cpu) {
  621 #ifdef I486_CPU
  622         case CPU_BLUE:
  623                 init_bluelightning();
  624                 break;
  625         case CPU_486DLC:
  626                 init_486dlc();
  627                 break;
  628         case CPU_CY486DX:
  629                 init_cy486dx();
  630                 break;
  631         case CPU_M1SC:
  632                 init_5x86();
  633                 break;
  634 #ifdef CPU_I486_ON_386
  635         case CPU_486:
  636                 init_i486_on_386();
  637                 break;
  638 #endif
  639         case CPU_M1:
  640                 init_6x86();
  641                 break;
  642 #endif /* I486_CPU */
  643 #ifdef I686_CPU
  644         case CPU_M2:
  645                 init_6x86MX();
  646                 break;
  647         case CPU_686:
  648                 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
  649                         switch (cpu_id & 0xff0) {
  650                         case 0x610:
  651                                 init_ppro();
  652                                 break;
  653                         case 0x660:
  654                                 init_mendocino();
  655                                 break;
  656                         }
  657                 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
  658 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
  659                         /*
  660                          * Sometimes the BIOS doesn't enable SSE instructions.
  661                          * According to AMD document 20734, the mobile
  662                          * Duron, the (mobile) Athlon 4 and the Athlon MP
  663                          * support SSE. These correspond to cpu_id 0x66X
  664                          * or 0x67X.
  665                          */
  666                         if ((cpu_feature & CPUID_XMM) == 0 &&
  667                             ((cpu_id & ~0xf) == 0x660 ||
  668                              (cpu_id & ~0xf) == 0x670 ||
  669                              (cpu_id & ~0xf) == 0x680)) {
  670                                 u_int regs[4];
  671                                 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
  672                                 do_cpuid(1, regs);
  673                                 cpu_feature = regs[3];
  674                         }
  675 #endif
  676                 } else if (strcmp(cpu_vendor, "CentaurHauls") == 0) {
  677                         switch (cpu_id & 0xff0) {
  678                         case 0x690:
  679                                 if ((cpu_id & 0xf) < 3)
  680                                         break;
  681                                 /* fall through. */
  682                         case 0x6a0:
  683                                 init_via();
  684                                 break;
  685                         default:
  686                                 break;
  687                         }
  688                 }
  689                 break;
  690 #endif
  691         default:
  692                 break;
  693         }
  694         enable_sse();
  695 
  696 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  697         /*
  698          * OS should flush L1 cache by itself because no PC-98 supports
  699          * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
  700          * when need_pre_dma_flush = 1, use invd instruction after DMA
  701          * transfer when need_post_dma_flush = 1.  If your CPU upgrade
  702          * product supports hardware cache control, you can add the
  703          * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
  704          * This option eliminates unneeded cache flush instruction(s).
  705          */
  706         if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
  707                 switch (cpu) {
  708 #ifdef I486_CPU
  709                 case CPU_486DLC:
  710                         need_post_dma_flush = 1;
  711                         break;
  712                 case CPU_M1SC:
  713                         need_pre_dma_flush = 1;
  714                         break;
  715                 case CPU_CY486DX:
  716                         need_pre_dma_flush = 1;
  717 #ifdef CPU_I486_ON_386
  718                         need_post_dma_flush = 1;
  719 #endif
  720                         break;
  721 #endif
  722                 default:
  723                         break;
  724                 }
  725         } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
  726                 switch (cpu_id & 0xFF0) {
  727                 case 0x470:             /* Enhanced Am486DX2 WB */
  728                 case 0x490:             /* Enhanced Am486DX4 WB */
  729                 case 0x4F0:             /* Am5x86 WB */
  730                         need_pre_dma_flush = 1;
  731                         break;
  732                 }
  733         } else if (strcmp(cpu_vendor, "IBM") == 0) {
  734                 need_post_dma_flush = 1;
  735         } else {
  736 #ifdef CPU_I486_ON_386
  737                 need_pre_dma_flush = 1;
  738 #endif
  739         }
  740 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
  741 }
  742 
  743 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
  744 /*
  745  * Enable write allocate feature of AMD processors.
  746  * Following two functions require the Maxmem variable being set.
  747  */
  748 void
  749 enable_K5_wt_alloc(void)
  750 {
  751         u_int64_t       msr;
  752         register_t      savecrit;
  753 
  754         /*
  755          * Write allocate is supported only on models 1, 2, and 3, with
  756          * a stepping of 4 or greater.
  757          */
  758         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
  759                 savecrit = intr_disable();
  760                 msr = rdmsr(0x83);              /* HWCR */
  761                 wrmsr(0x83, msr & !(0x10));
  762 
  763                 /*
  764                  * We have to tell the chip where the top of memory is,
  765                  * since video cards could have frame bufferes there,
  766                  * memory-mapped I/O could be there, etc.
  767                  */
  768                 if(Maxmem > 0)
  769                   msr = Maxmem / 16;
  770                 else
  771                   msr = 0;
  772                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
  773 #ifdef PC98
  774                 if (!(inb(0x43b) & 4)) {
  775                         wrmsr(0x86, 0x0ff00f0);
  776                         msr |= AMD_WT_ALLOC_PRE;
  777                 }
  778 #else
  779                 /*
  780                  * There is no way to know wheter 15-16M hole exists or not. 
  781                  * Therefore, we disable write allocate for this range.
  782                  */
  783                         wrmsr(0x86, 0x0ff00f0);
  784                         msr |= AMD_WT_ALLOC_PRE;
  785 #endif
  786                 wrmsr(0x85, msr);
  787 
  788                 msr=rdmsr(0x83);
  789                 wrmsr(0x83, msr|0x10); /* enable write allocate */
  790                 intr_restore(savecrit);
  791         }
  792 }
  793 
  794 void
  795 enable_K6_wt_alloc(void)
  796 {
  797         quad_t  size;
  798         u_int64_t       whcr;
  799         u_long  eflags;
  800 
  801         eflags = read_eflags();
  802         disable_intr();
  803         wbinvd();
  804 
  805 #ifdef CPU_DISABLE_CACHE
  806         /*
  807          * Certain K6-2 box becomes unstable when write allocation is
  808          * enabled.
  809          */
  810         /*
  811          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  812          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  813          * All other bits in TR12 have no effect on the processer's operation.
  814          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  815          * on the AMD-K6.
  816          */
  817         wrmsr(0x0000000e, (u_int64_t)0x0008);
  818 #endif
  819         /* Don't assume that memory size is aligned with 4M. */
  820         if (Maxmem > 0)
  821           size = ((Maxmem >> 8) + 3) >> 2;
  822         else
  823           size = 0;
  824 
  825         /* Limit is 508M bytes. */
  826         if (size > 0x7f)
  827                 size = 0x7f;
  828         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
  829 
  830 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  831         if (whcr & (0x7fLL << 1)) {
  832 #ifdef PC98
  833                 /*
  834                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  835                  * 15-16M range.
  836                  */
  837                 if (!(inb(0x43b) & 4))
  838                         whcr &= ~0x0001LL;
  839                 else
  840 #endif
  841                         whcr |=  0x0001LL;
  842         }
  843 #else
  844         /*
  845          * There is no way to know wheter 15-16M hole exists or not. 
  846          * Therefore, we disable write allocate for this range.
  847          */
  848         whcr &= ~0x0001LL;
  849 #endif
  850         wrmsr(0x0c0000082, whcr);
  851 
  852         write_eflags(eflags);
  853 }
  854 
  855 void
  856 enable_K6_2_wt_alloc(void)
  857 {
  858         quad_t  size;
  859         u_int64_t       whcr;
  860         u_long  eflags;
  861 
  862         eflags = read_eflags();
  863         disable_intr();
  864         wbinvd();
  865 
  866 #ifdef CPU_DISABLE_CACHE
  867         /*
  868          * Certain K6-2 box becomes unstable when write allocation is
  869          * enabled.
  870          */
  871         /*
  872          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  873          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  874          * All other bits in TR12 have no effect on the processer's operation.
  875          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  876          * on the AMD-K6.
  877          */
  878         wrmsr(0x0000000e, (u_int64_t)0x0008);
  879 #endif
  880         /* Don't assume that memory size is aligned with 4M. */
  881         if (Maxmem > 0)
  882           size = ((Maxmem >> 8) + 3) >> 2;
  883         else
  884           size = 0;
  885 
  886         /* Limit is 4092M bytes. */
  887         if (size > 0x3fff)
  888                 size = 0x3ff;
  889         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
  890 
  891 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  892         if (whcr & (0x3ffLL << 22)) {
  893 #ifdef PC98
  894                 /*
  895                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  896                  * 15-16M range.
  897                  */
  898                 if (!(inb(0x43b) & 4))
  899                         whcr &= ~(1LL << 16);
  900                 else
  901 #endif
  902                         whcr |=  1LL << 16;
  903         }
  904 #else
  905         /*
  906          * There is no way to know wheter 15-16M hole exists or not. 
  907          * Therefore, we disable write allocate for this range.
  908          */
  909         whcr &= ~(1LL << 16);
  910 #endif
  911         wrmsr(0x0c0000082, whcr);
  912 
  913         write_eflags(eflags);
  914 }
  915 #endif /* I585_CPU && CPU_WT_ALLOC */
  916 
  917 #include "opt_ddb.h"
  918 #ifdef DDB
  919 #include <ddb/ddb.h>
  920 
  921 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
  922 {
  923         u_long  eflags;
  924         u_int   cr0;
  925         u_char  ccr1, ccr2, ccr3;
  926         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
  927 
  928         cr0 = rcr0();
  929         if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
  930                 eflags = read_eflags();
  931                 disable_intr();
  932 
  933 
  934                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
  935                         ccr0 = read_cyrix_reg(CCR0);
  936                 }
  937                 ccr1 = read_cyrix_reg(CCR1);
  938                 ccr2 = read_cyrix_reg(CCR2);
  939                 ccr3 = read_cyrix_reg(CCR3);
  940                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
  941                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  942                         ccr4 = read_cyrix_reg(CCR4);
  943                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
  944                                 ccr5 = read_cyrix_reg(CCR5);
  945                         else
  946                                 pcr0 = read_cyrix_reg(PCR0);
  947                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
  948                 }
  949                 write_eflags(eflags);
  950 
  951                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
  952                         printf("CCR0=%x, ", (u_int)ccr0);
  953 
  954                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
  955                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
  956                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
  957                         printf(", CCR4=%x, ", (u_int)ccr4);
  958                         if (cpu == CPU_M1SC)
  959                                 printf("PCR0=%x\n", pcr0);
  960                         else
  961                                 printf("CCR5=%x\n", ccr5);
  962                 }
  963         }
  964         printf("CR0=%x\n", cr0);
  965 }
  966 #endif /* DDB */

Cache object: f6fdadabcaad9bea66f46654223c8a2d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.