The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/initcpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) KATO Takenori, 1997, 1998.
    3  * 
    4  * All rights reserved.  Unpublished rights reserved under the copyright
    5  * laws of Japan.
    6  * 
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer as
   13  *    the first lines of this file unmodified.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/9.0/sys/i386/i386/initcpu.c 220018 2011-03-26 02:02:07Z jkim $");
   32 
   33 #include "opt_cpu.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/kernel.h>
   37 #include <sys/systm.h>
   38 #include <sys/sysctl.h>
   39 
   40 #include <machine/cputypes.h>
   41 #include <machine/md_var.h>
   42 #include <machine/specialreg.h>
   43 
   44 #include <vm/vm.h>
   45 #include <vm/pmap.h>
   46 
   47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
   48 #define CPU_ENABLE_SSE
   49 #endif
   50 
   51 void initializecpu(void);
   52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
   53 void    enable_K5_wt_alloc(void);
   54 void    enable_K6_wt_alloc(void);
   55 void    enable_K6_2_wt_alloc(void);
   56 #endif
   57 
   58 #ifdef I486_CPU
   59 static void init_5x86(void);
   60 static void init_bluelightning(void);
   61 static void init_486dlc(void);
   62 static void init_cy486dx(void);
   63 #ifdef CPU_I486_ON_386
   64 static void init_i486_on_386(void);
   65 #endif
   66 static void init_6x86(void);
   67 #endif /* I486_CPU */
   68 
   69 #ifdef I686_CPU
   70 static void     init_6x86MX(void);
   71 static void     init_ppro(void);
   72 static void     init_mendocino(void);
   73 #endif
   74 
   75 static int      hw_instruction_sse;
   76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
   77     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
   78 /*
   79  * -1: automatic (default)
   80  *  0: keep enable CLFLUSH
   81  *  1: force disable CLFLUSH
   82  */
   83 static int      hw_clflush_disable = -1;
   84 
   85 /* Must *NOT* be BSS or locore will bzero these after setting them */
   86 int     cpu = 0;                /* Are we 386, 386sx, 486, etc? */
   87 u_int   cpu_feature = 0;        /* Feature flags */
   88 u_int   cpu_feature2 = 0;       /* Feature flags */
   89 u_int   amd_feature = 0;        /* AMD feature flags */
   90 u_int   amd_feature2 = 0;       /* AMD feature flags */
   91 u_int   amd_pminfo = 0;         /* AMD advanced power management info */
   92 u_int   via_feature_rng = 0;    /* VIA RNG features */
   93 u_int   via_feature_xcrypt = 0; /* VIA ACE features */
   94 u_int   cpu_high = 0;           /* Highest arg to CPUID */
   95 u_int   cpu_id = 0;             /* Stepping ID */
   96 u_int   cpu_procinfo = 0;       /* HyperThreading Info / Brand Index / CLFUSH */
   97 u_int   cpu_procinfo2 = 0;      /* Multicore info */
   98 char    cpu_vendor[20] = "";    /* CPU Origin code */
   99 u_int   cpu_vendor_id = 0;      /* CPU vendor ID */
  100 u_int   cpu_clflush_line_size = 32;
  101 
  102 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
  103         &via_feature_rng, 0, "VIA RNG feature available in CPU");
  104 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
  105         &via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU");
  106 
  107 #ifdef CPU_ENABLE_SSE
  108 u_int   cpu_fxsr;               /* SSE enabled */
  109 u_int   cpu_mxcsr_mask;         /* valid bits in mxcsr */
  110 #endif
  111 
  112 #ifdef I486_CPU
  113 /*
  114  * IBM Blue Lightning
  115  */
  116 static void
  117 init_bluelightning(void)
  118 {
  119         register_t saveintr;
  120 
  121 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  122         need_post_dma_flush = 1;
  123 #endif
  124 
  125         saveintr = intr_disable();
  126 
  127         load_cr0(rcr0() | CR0_CD | CR0_NW);
  128         invd();
  129 
  130 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
  131         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
  132 #else
  133         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
  134 #endif
  135         /* Enables 13MB and 0-640KB cache. */
  136         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
  137 #ifdef CPU_BLUELIGHTNING_3X
  138         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
  139 #else
  140         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
  141 #endif
  142 
  143         /* Enable caching in CR0. */
  144         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  145         invd();
  146         intr_restore(saveintr);
  147 }
  148 
  149 /*
  150  * Cyrix 486SLC/DLC/SR/DR series
  151  */
  152 static void
  153 init_486dlc(void)
  154 {
  155         register_t saveintr;
  156         u_char  ccr0;
  157 
  158         saveintr = intr_disable();
  159         invd();
  160 
  161         ccr0 = read_cyrix_reg(CCR0);
  162 #ifndef CYRIX_CACHE_WORKS
  163         ccr0 |= CCR0_NC1 | CCR0_BARB;
  164         write_cyrix_reg(CCR0, ccr0);
  165         invd();
  166 #else
  167         ccr0 &= ~CCR0_NC0;
  168 #ifndef CYRIX_CACHE_REALLY_WORKS
  169         ccr0 |= CCR0_NC1 | CCR0_BARB;
  170 #else
  171         ccr0 |= CCR0_NC1;
  172 #endif
  173 #ifdef CPU_DIRECT_MAPPED_CACHE
  174         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
  175 #endif
  176         write_cyrix_reg(CCR0, ccr0);
  177 
  178         /* Clear non-cacheable region. */
  179         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
  180         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
  181         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
  182         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
  183 
  184         write_cyrix_reg(0, 0);  /* dummy write */
  185 
  186         /* Enable caching in CR0. */
  187         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  188         invd();
  189 #endif /* !CYRIX_CACHE_WORKS */
  190         intr_restore(saveintr);
  191 }
  192 
  193 
  194 /*
  195  * Cyrix 486S/DX series
  196  */
  197 static void
  198 init_cy486dx(void)
  199 {
  200         register_t saveintr;
  201         u_char  ccr2;
  202 
  203         saveintr = intr_disable();
  204         invd();
  205 
  206         ccr2 = read_cyrix_reg(CCR2);
  207 #ifdef CPU_SUSP_HLT
  208         ccr2 |= CCR2_SUSP_HLT;
  209 #endif
  210 
  211 #ifdef PC98
  212         /* Enables WB cache interface pin and Lock NW bit in CR0. */
  213         ccr2 |= CCR2_WB | CCR2_LOCK_NW;
  214         /* Unlock NW bit in CR0. */
  215         write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
  216         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  217 #endif
  218 
  219         write_cyrix_reg(CCR2, ccr2);
  220         intr_restore(saveintr);
  221 }
  222 
  223 
  224 /*
  225  * Cyrix 5x86
  226  */
  227 static void
  228 init_5x86(void)
  229 {
  230         register_t saveintr;
  231         u_char  ccr2, ccr3, ccr4, pcr0;
  232 
  233         saveintr = intr_disable();
  234 
  235         load_cr0(rcr0() | CR0_CD | CR0_NW);
  236         wbinvd();
  237 
  238         (void)read_cyrix_reg(CCR3);             /* dummy */
  239 
  240         /* Initialize CCR2. */
  241         ccr2 = read_cyrix_reg(CCR2);
  242         ccr2 |= CCR2_WB;
  243 #ifdef CPU_SUSP_HLT
  244         ccr2 |= CCR2_SUSP_HLT;
  245 #else
  246         ccr2 &= ~CCR2_SUSP_HLT;
  247 #endif
  248         ccr2 |= CCR2_WT1;
  249         write_cyrix_reg(CCR2, ccr2);
  250 
  251         /* Initialize CCR4. */
  252         ccr3 = read_cyrix_reg(CCR3);
  253         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  254 
  255         ccr4 = read_cyrix_reg(CCR4);
  256         ccr4 |= CCR4_DTE;
  257         ccr4 |= CCR4_MEM;
  258 #ifdef CPU_FASTER_5X86_FPU
  259         ccr4 |= CCR4_FASTFPE;
  260 #else
  261         ccr4 &= ~CCR4_FASTFPE;
  262 #endif
  263         ccr4 &= ~CCR4_IOMASK;
  264         /********************************************************************
  265          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
  266          * should be 0 for errata fix.
  267          ********************************************************************/
  268 #ifdef CPU_IORT
  269         ccr4 |= CPU_IORT & CCR4_IOMASK;
  270 #endif
  271         write_cyrix_reg(CCR4, ccr4);
  272 
  273         /* Initialize PCR0. */
  274         /****************************************************************
  275          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
  276          * BTB_EN might make your system unstable.
  277          ****************************************************************/
  278         pcr0 = read_cyrix_reg(PCR0);
  279 #ifdef CPU_RSTK_EN
  280         pcr0 |= PCR0_RSTK;
  281 #else
  282         pcr0 &= ~PCR0_RSTK;
  283 #endif
  284 #ifdef CPU_BTB_EN
  285         pcr0 |= PCR0_BTB;
  286 #else
  287         pcr0 &= ~PCR0_BTB;
  288 #endif
  289 #ifdef CPU_LOOP_EN
  290         pcr0 |= PCR0_LOOP;
  291 #else
  292         pcr0 &= ~PCR0_LOOP;
  293 #endif
  294 
  295         /****************************************************************
  296          * WARNING: if you use a memory mapped I/O device, don't use
  297          * DISABLE_5X86_LSSER option, which may reorder memory mapped
  298          * I/O access.
  299          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
  300          ****************************************************************/
  301 #ifdef CPU_DISABLE_5X86_LSSER
  302         pcr0 &= ~PCR0_LSSER;
  303 #else
  304         pcr0 |= PCR0_LSSER;
  305 #endif
  306         write_cyrix_reg(PCR0, pcr0);
  307 
  308         /* Restore CCR3. */
  309         write_cyrix_reg(CCR3, ccr3);
  310 
  311         (void)read_cyrix_reg(0x80);             /* dummy */
  312 
  313         /* Unlock NW bit in CR0. */
  314         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  315         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  316         /* Lock NW bit in CR0. */
  317         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  318 
  319         intr_restore(saveintr);
  320 }
  321 
  322 #ifdef CPU_I486_ON_386
  323 /*
  324  * There are i486 based upgrade products for i386 machines.
  325  * In this case, BIOS doesn't enable CPU cache.
  326  */
  327 static void
  328 init_i486_on_386(void)
  329 {
  330         register_t saveintr;
  331 
  332 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  333         need_post_dma_flush = 1;
  334 #endif
  335 
  336         saveintr = intr_disable();
  337 
  338         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
  339 
  340         intr_restore(saveintr);
  341 }
  342 #endif
  343 
  344 /*
  345  * Cyrix 6x86
  346  *
  347  * XXX - What should I do here?  Please let me know.
  348  */
  349 static void
  350 init_6x86(void)
  351 {
  352         register_t saveintr;
  353         u_char  ccr3, ccr4;
  354 
  355         saveintr = intr_disable();
  356 
  357         load_cr0(rcr0() | CR0_CD | CR0_NW);
  358         wbinvd();
  359 
  360         /* Initialize CCR0. */
  361         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  362 
  363         /* Initialize CCR1. */
  364 #ifdef CPU_CYRIX_NO_LOCK
  365         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  366 #else
  367         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  368 #endif
  369 
  370         /* Initialize CCR2. */
  371 #ifdef CPU_SUSP_HLT
  372         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  373 #else
  374         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  375 #endif
  376 
  377         ccr3 = read_cyrix_reg(CCR3);
  378         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  379 
  380         /* Initialize CCR4. */
  381         ccr4 = read_cyrix_reg(CCR4);
  382         ccr4 |= CCR4_DTE;
  383         ccr4 &= ~CCR4_IOMASK;
  384 #ifdef CPU_IORT
  385         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  386 #else
  387         write_cyrix_reg(CCR4, ccr4 | 7);
  388 #endif
  389 
  390         /* Initialize CCR5. */
  391 #ifdef CPU_WT_ALLOC
  392         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  393 #endif
  394 
  395         /* Restore CCR3. */
  396         write_cyrix_reg(CCR3, ccr3);
  397 
  398         /* Unlock NW bit in CR0. */
  399         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  400 
  401         /*
  402          * Earlier revision of the 6x86 CPU could crash the system if
  403          * L1 cache is in write-back mode.
  404          */
  405         if ((cyrix_did & 0xff00) > 0x1600)
  406                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  407         else {
  408                 /* Revision 2.6 and lower. */
  409 #ifdef CYRIX_CACHE_REALLY_WORKS
  410                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  411 #else
  412                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
  413 #endif
  414         }
  415 
  416         /* Lock NW bit in CR0. */
  417         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  418 
  419         intr_restore(saveintr);
  420 }
  421 #endif /* I486_CPU */
  422 
  423 #ifdef I586_CPU
  424 /*
  425  * IDT WinChip C6/2/2A/2B/3
  426  *
  427  * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
  428  */
  429 static void
  430 init_winchip(void)
  431 {
  432         u_int regs[4];
  433         uint64_t fcr;
  434 
  435         fcr = rdmsr(0x0107);
  436 
  437         /*
  438          * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
  439          */
  440         fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
  441         fcr &= ~(1ULL << 11);
  442 
  443         /*
  444          * Additioanlly, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
  445          */
  446         if (CPUID_TO_MODEL(cpu_id) >= 8)
  447                 fcr |= (1 << 12) | (1 << 19) | (1 << 20);
  448 
  449         wrmsr(0x0107, fcr);
  450         do_cpuid(1, regs);
  451         cpu_feature = regs[3];
  452 }
  453 #endif
  454 
  455 #ifdef I686_CPU
  456 /*
  457  * Cyrix 6x86MX (code-named M2)
  458  *
  459  * XXX - What should I do here?  Please let me know.
  460  */
  461 static void
  462 init_6x86MX(void)
  463 {
  464         register_t saveintr;
  465         u_char  ccr3, ccr4;
  466 
  467         saveintr = intr_disable();
  468 
  469         load_cr0(rcr0() | CR0_CD | CR0_NW);
  470         wbinvd();
  471 
  472         /* Initialize CCR0. */
  473         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  474 
  475         /* Initialize CCR1. */
  476 #ifdef CPU_CYRIX_NO_LOCK
  477         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  478 #else
  479         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  480 #endif
  481 
  482         /* Initialize CCR2. */
  483 #ifdef CPU_SUSP_HLT
  484         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  485 #else
  486         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  487 #endif
  488 
  489         ccr3 = read_cyrix_reg(CCR3);
  490         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  491 
  492         /* Initialize CCR4. */
  493         ccr4 = read_cyrix_reg(CCR4);
  494         ccr4 &= ~CCR4_IOMASK;
  495 #ifdef CPU_IORT
  496         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  497 #else
  498         write_cyrix_reg(CCR4, ccr4 | 7);
  499 #endif
  500 
  501         /* Initialize CCR5. */
  502 #ifdef CPU_WT_ALLOC
  503         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  504 #endif
  505 
  506         /* Restore CCR3. */
  507         write_cyrix_reg(CCR3, ccr3);
  508 
  509         /* Unlock NW bit in CR0. */
  510         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  511 
  512         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  513 
  514         /* Lock NW bit in CR0. */
  515         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  516 
  517         intr_restore(saveintr);
  518 }
  519 
  520 static void
  521 init_ppro(void)
  522 {
  523         u_int64_t       apicbase;
  524 
  525         /*
  526          * Local APIC should be disabled if it is not going to be used.
  527          */
  528         apicbase = rdmsr(MSR_APICBASE);
  529         apicbase &= ~APICBASE_ENABLED;
  530         wrmsr(MSR_APICBASE, apicbase);
  531 }
  532 
  533 /*
  534  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
  535  * L2 cache).
  536  */
  537 static void
  538 init_mendocino(void)
  539 {
  540 #ifdef CPU_PPRO2CELERON
  541         register_t      saveintr;
  542         u_int64_t       bbl_cr_ctl3;
  543 
  544         saveintr = intr_disable();
  545 
  546         load_cr0(rcr0() | CR0_CD | CR0_NW);
  547         wbinvd();
  548 
  549         bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
  550 
  551         /* If the L2 cache is configured, do nothing. */
  552         if (!(bbl_cr_ctl3 & 1)) {
  553                 bbl_cr_ctl3 = 0x134052bLL;
  554 
  555                 /* Set L2 Cache Latency (Default: 5). */
  556 #ifdef  CPU_CELERON_L2_LATENCY
  557 #if CPU_L2_LATENCY > 15
  558 #error invalid CPU_L2_LATENCY.
  559 #endif
  560                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
  561 #else
  562                 bbl_cr_ctl3 |= 5 << 1;
  563 #endif
  564                 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
  565         }
  566 
  567         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
  568         intr_restore(saveintr);
  569 #endif /* CPU_PPRO2CELERON */
  570 }
  571 
  572 /*
  573  * Initialize special VIA features
  574  */
  575 static void
  576 init_via(void)
  577 {
  578         u_int regs[4], val;
  579         uint64_t fcr;
  580 
  581         /*
  582          * Explicitly enable CX8 and PGE on C3.
  583          *
  584          * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
  585          */
  586         if (CPUID_TO_MODEL(cpu_id) <= 9)
  587                 fcr = (1 << 1) | (1 << 7);
  588         else
  589                 fcr = 0;
  590 
  591         /*
  592          * Check extended CPUID for PadLock features.
  593          *
  594          * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
  595          */
  596         do_cpuid(0xc0000000, regs);
  597         if (regs[0] >= 0xc0000001) {
  598                 do_cpuid(0xc0000001, regs);
  599                 val = regs[3];
  600         } else
  601                 val = 0;
  602 
  603         /* Enable RNG if present. */
  604         if ((val & VIA_CPUID_HAS_RNG) != 0) {
  605                 via_feature_rng = VIA_HAS_RNG;
  606                 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
  607         }
  608 
  609         /* Enable PadLock if present. */
  610         if ((val & VIA_CPUID_HAS_ACE) != 0)
  611                 via_feature_xcrypt |= VIA_HAS_AES;
  612         if ((val & VIA_CPUID_HAS_ACE2) != 0)
  613                 via_feature_xcrypt |= VIA_HAS_AESCTR;
  614         if ((val & VIA_CPUID_HAS_PHE) != 0)
  615                 via_feature_xcrypt |= VIA_HAS_SHA;
  616         if ((val & VIA_CPUID_HAS_PMM) != 0)
  617                 via_feature_xcrypt |= VIA_HAS_MM;
  618         if (via_feature_xcrypt != 0)
  619                 fcr |= 1 << 28;
  620 
  621         wrmsr(0x1107, rdmsr(0x1107) | fcr);
  622 }
  623 
  624 #endif /* I686_CPU */
  625 
  626 #if defined(I586_CPU) || defined(I686_CPU)
  627 static void
  628 init_transmeta(void)
  629 {
  630         u_int regs[0];
  631 
  632         /* Expose all hidden features. */
  633         wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
  634         do_cpuid(1, regs);
  635         cpu_feature = regs[3];
  636 }
  637 #endif
  638 
  639 /*
  640  * Initialize CR4 (Control register 4) to enable SSE instructions.
  641  */
  642 void
  643 enable_sse(void)
  644 {
  645 #if defined(CPU_ENABLE_SSE)
  646         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
  647                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
  648                 cpu_fxsr = hw_instruction_sse = 1;
  649         }
  650 #endif
  651 }
  652 
  653 void
  654 initializecpu(void)
  655 {
  656 
  657         switch (cpu) {
  658 #ifdef I486_CPU
  659         case CPU_BLUE:
  660                 init_bluelightning();
  661                 break;
  662         case CPU_486DLC:
  663                 init_486dlc();
  664                 break;
  665         case CPU_CY486DX:
  666                 init_cy486dx();
  667                 break;
  668         case CPU_M1SC:
  669                 init_5x86();
  670                 break;
  671 #ifdef CPU_I486_ON_386
  672         case CPU_486:
  673                 init_i486_on_386();
  674                 break;
  675 #endif
  676         case CPU_M1:
  677                 init_6x86();
  678                 break;
  679 #endif /* I486_CPU */
  680 #ifdef I586_CPU
  681         case CPU_586:
  682                 switch (cpu_vendor_id) {
  683                 case CPU_VENDOR_CENTAUR:
  684                         init_winchip();
  685                         break;
  686                 case CPU_VENDOR_TRANSMETA:
  687                         init_transmeta();
  688                         break;
  689                 }
  690                 break;
  691 #endif
  692 #ifdef I686_CPU
  693         case CPU_M2:
  694                 init_6x86MX();
  695                 break;
  696         case CPU_686:
  697                 switch (cpu_vendor_id) {
  698                 case CPU_VENDOR_INTEL:
  699                         switch (cpu_id & 0xff0) {
  700                         case 0x610:
  701                                 init_ppro();
  702                                 break;
  703                         case 0x660:
  704                                 init_mendocino();
  705                                 break;
  706                         }
  707                         break;
  708 #ifdef CPU_ATHLON_SSE_HACK
  709                 case CPU_VENDOR_AMD:
  710                         /*
  711                          * Sometimes the BIOS doesn't enable SSE instructions.
  712                          * According to AMD document 20734, the mobile
  713                          * Duron, the (mobile) Athlon 4 and the Athlon MP
  714                          * support SSE. These correspond to cpu_id 0x66X
  715                          * or 0x67X.
  716                          */
  717                         if ((cpu_feature & CPUID_XMM) == 0 &&
  718                             ((cpu_id & ~0xf) == 0x660 ||
  719                              (cpu_id & ~0xf) == 0x670 ||
  720                              (cpu_id & ~0xf) == 0x680)) {
  721                                 u_int regs[4];
  722                                 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
  723                                 do_cpuid(1, regs);
  724                                 cpu_feature = regs[3];
  725                         }
  726                         break;
  727 #endif
  728                 case CPU_VENDOR_CENTAUR:
  729                         init_via();
  730                         break;
  731                 case CPU_VENDOR_TRANSMETA:
  732                         init_transmeta();
  733                         break;
  734                 }
  735 #ifdef PAE
  736                 if ((amd_feature & AMDID_NX) != 0) {
  737                         uint64_t msr;
  738 
  739                         msr = rdmsr(MSR_EFER) | EFER_NXE;
  740                         wrmsr(MSR_EFER, msr);
  741                         pg_nx = PG_NX;
  742                 }
  743 #endif
  744                 break;
  745 #endif
  746         default:
  747                 break;
  748         }
  749         enable_sse();
  750 
  751         /*
  752          * CPUID with %eax = 1, %ebx returns
  753          * Bits 15-8: CLFLUSH line size
  754          *      (Value * 8 = cache line size in bytes)
  755          */
  756         if ((cpu_feature & CPUID_CLFSH) != 0)
  757                 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
  758         /*
  759          * XXXKIB: (temporary) hack to work around traps generated
  760          * when CLFLUSHing APIC register window under virtualization
  761          * environments.  These environments tend to disable the
  762          * CPUID_SS feature even though the native CPU supports it.
  763          */
  764         TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
  765         if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
  766                 cpu_feature &= ~CPUID_CLFSH;
  767         /*
  768          * Allow to disable CLFLUSH feature manually by
  769          * hw.clflush_disable tunable.
  770          */
  771         if (hw_clflush_disable == 1)
  772                 cpu_feature &= ~CPUID_CLFSH;
  773 
  774 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  775         /*
  776          * OS should flush L1 cache by itself because no PC-98 supports
  777          * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
  778          * when need_pre_dma_flush = 1, use invd instruction after DMA
  779          * transfer when need_post_dma_flush = 1.  If your CPU upgrade
  780          * product supports hardware cache control, you can add the
  781          * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
  782          * This option eliminates unneeded cache flush instruction(s).
  783          */
  784         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
  785                 switch (cpu) {
  786 #ifdef I486_CPU
  787                 case CPU_486DLC:
  788                         need_post_dma_flush = 1;
  789                         break;
  790                 case CPU_M1SC:
  791                         need_pre_dma_flush = 1;
  792                         break;
  793                 case CPU_CY486DX:
  794                         need_pre_dma_flush = 1;
  795 #ifdef CPU_I486_ON_386
  796                         need_post_dma_flush = 1;
  797 #endif
  798                         break;
  799 #endif
  800                 default:
  801                         break;
  802                 }
  803         } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
  804                 switch (cpu_id & 0xFF0) {
  805                 case 0x470:             /* Enhanced Am486DX2 WB */
  806                 case 0x490:             /* Enhanced Am486DX4 WB */
  807                 case 0x4F0:             /* Am5x86 WB */
  808                         need_pre_dma_flush = 1;
  809                         break;
  810                 }
  811         } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
  812                 need_post_dma_flush = 1;
  813         } else {
  814 #ifdef CPU_I486_ON_386
  815                 need_pre_dma_flush = 1;
  816 #endif
  817         }
  818 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
  819 }
  820 
  821 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
  822 /*
  823  * Enable write allocate feature of AMD processors.
  824  * Following two functions require the Maxmem variable being set.
  825  */
  826 void
  827 enable_K5_wt_alloc(void)
  828 {
  829         u_int64_t       msr;
  830         register_t      saveintr;
  831 
  832         /*
  833          * Write allocate is supported only on models 1, 2, and 3, with
  834          * a stepping of 4 or greater.
  835          */
  836         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
  837                 saveintr = intr_disable();
  838                 msr = rdmsr(0x83);              /* HWCR */
  839                 wrmsr(0x83, msr & !(0x10));
  840 
  841                 /*
  842                  * We have to tell the chip where the top of memory is,
  843                  * since video cards could have frame bufferes there,
  844                  * memory-mapped I/O could be there, etc.
  845                  */
  846                 if(Maxmem > 0)
  847                   msr = Maxmem / 16;
  848                 else
  849                   msr = 0;
  850                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
  851 #ifdef PC98
  852                 if (!(inb(0x43b) & 4)) {
  853                         wrmsr(0x86, 0x0ff00f0);
  854                         msr |= AMD_WT_ALLOC_PRE;
  855                 }
  856 #else
  857                 /*
  858                  * There is no way to know wheter 15-16M hole exists or not. 
  859                  * Therefore, we disable write allocate for this range.
  860                  */
  861                         wrmsr(0x86, 0x0ff00f0);
  862                         msr |= AMD_WT_ALLOC_PRE;
  863 #endif
  864                 wrmsr(0x85, msr);
  865 
  866                 msr=rdmsr(0x83);
  867                 wrmsr(0x83, msr|0x10); /* enable write allocate */
  868                 intr_restore(saveintr);
  869         }
  870 }
  871 
  872 void
  873 enable_K6_wt_alloc(void)
  874 {
  875         quad_t  size;
  876         u_int64_t       whcr;
  877         register_t      saveintr;
  878 
  879         saveintr = intr_disable();
  880         wbinvd();
  881 
  882 #ifdef CPU_DISABLE_CACHE
  883         /*
  884          * Certain K6-2 box becomes unstable when write allocation is
  885          * enabled.
  886          */
  887         /*
  888          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  889          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  890          * All other bits in TR12 have no effect on the processer's operation.
  891          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  892          * on the AMD-K6.
  893          */
  894         wrmsr(0x0000000e, (u_int64_t)0x0008);
  895 #endif
  896         /* Don't assume that memory size is aligned with 4M. */
  897         if (Maxmem > 0)
  898           size = ((Maxmem >> 8) + 3) >> 2;
  899         else
  900           size = 0;
  901 
  902         /* Limit is 508M bytes. */
  903         if (size > 0x7f)
  904                 size = 0x7f;
  905         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
  906 
  907 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  908         if (whcr & (0x7fLL << 1)) {
  909 #ifdef PC98
  910                 /*
  911                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  912                  * 15-16M range.
  913                  */
  914                 if (!(inb(0x43b) & 4))
  915                         whcr &= ~0x0001LL;
  916                 else
  917 #endif
  918                         whcr |=  0x0001LL;
  919         }
  920 #else
  921         /*
  922          * There is no way to know wheter 15-16M hole exists or not. 
  923          * Therefore, we disable write allocate for this range.
  924          */
  925         whcr &= ~0x0001LL;
  926 #endif
  927         wrmsr(0x0c0000082, whcr);
  928 
  929         intr_restore(saveintr);
  930 }
  931 
  932 void
  933 enable_K6_2_wt_alloc(void)
  934 {
  935         quad_t  size;
  936         u_int64_t       whcr;
  937         register_t      saveintr;
  938 
  939         saveintr = intr_disable();
  940         wbinvd();
  941 
  942 #ifdef CPU_DISABLE_CACHE
  943         /*
  944          * Certain K6-2 box becomes unstable when write allocation is
  945          * enabled.
  946          */
  947         /*
  948          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  949          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  950          * All other bits in TR12 have no effect on the processer's operation.
  951          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  952          * on the AMD-K6.
  953          */
  954         wrmsr(0x0000000e, (u_int64_t)0x0008);
  955 #endif
  956         /* Don't assume that memory size is aligned with 4M. */
  957         if (Maxmem > 0)
  958           size = ((Maxmem >> 8) + 3) >> 2;
  959         else
  960           size = 0;
  961 
  962         /* Limit is 4092M bytes. */
  963         if (size > 0x3fff)
  964                 size = 0x3ff;
  965         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
  966 
  967 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  968         if (whcr & (0x3ffLL << 22)) {
  969 #ifdef PC98
  970                 /*
  971                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  972                  * 15-16M range.
  973                  */
  974                 if (!(inb(0x43b) & 4))
  975                         whcr &= ~(1LL << 16);
  976                 else
  977 #endif
  978                         whcr |=  1LL << 16;
  979         }
  980 #else
  981         /*
  982          * There is no way to know wheter 15-16M hole exists or not. 
  983          * Therefore, we disable write allocate for this range.
  984          */
  985         whcr &= ~(1LL << 16);
  986 #endif
  987         wrmsr(0x0c0000082, whcr);
  988 
  989         intr_restore(saveintr);
  990 }
  991 #endif /* I585_CPU && CPU_WT_ALLOC */
  992 
  993 #include "opt_ddb.h"
  994 #ifdef DDB
  995 #include <ddb/ddb.h>
  996 
  997 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
  998 {
  999         register_t saveintr;
 1000         u_int   cr0;
 1001         u_char  ccr1, ccr2, ccr3;
 1002         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
 1003 
 1004         cr0 = rcr0();
 1005         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
 1006                 saveintr = intr_disable();
 1007 
 1008 
 1009                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
 1010                         ccr0 = read_cyrix_reg(CCR0);
 1011                 }
 1012                 ccr1 = read_cyrix_reg(CCR1);
 1013                 ccr2 = read_cyrix_reg(CCR2);
 1014                 ccr3 = read_cyrix_reg(CCR3);
 1015                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
 1016                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
 1017                         ccr4 = read_cyrix_reg(CCR4);
 1018                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
 1019                                 ccr5 = read_cyrix_reg(CCR5);
 1020                         else
 1021                                 pcr0 = read_cyrix_reg(PCR0);
 1022                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
 1023                 }
 1024                 intr_restore(saveintr);
 1025 
 1026                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
 1027                         printf("CCR0=%x, ", (u_int)ccr0);
 1028 
 1029                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
 1030                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
 1031                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
 1032                         printf(", CCR4=%x, ", (u_int)ccr4);
 1033                         if (cpu == CPU_M1SC)
 1034                                 printf("PCR0=%x\n", pcr0);
 1035                         else
 1036                                 printf("CCR5=%x\n", ccr5);
 1037                 }
 1038         }
 1039         printf("CR0=%x\n", cr0);
 1040 }
 1041 #endif /* DDB */

Cache object: 54275c4e8af235c072423dcb0b68e2c2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.