The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/initcpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) KATO Takenori, 1997, 1998.
    3  * 
    4  * All rights reserved.  Unpublished rights reserved under the copyright
    5  * laws of Japan.
    6  * 
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer as
   13  *    the first lines of this file unmodified.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/10.4/sys/i386/i386/initcpu.c 313150 2017-02-03 12:20:44Z kib $");
   32 
   33 #include "opt_cpu.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/kernel.h>
   37 #include <sys/systm.h>
   38 #include <sys/sysctl.h>
   39 
   40 #include <machine/cputypes.h>
   41 #include <machine/md_var.h>
   42 #include <machine/specialreg.h>
   43 
   44 #include <vm/vm.h>
   45 #include <vm/pmap.h>
   46 
   47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
   48 #define CPU_ENABLE_SSE
   49 #endif
   50 
   51 #ifdef I486_CPU
   52 static void init_5x86(void);
   53 static void init_bluelightning(void);
   54 static void init_486dlc(void);
   55 static void init_cy486dx(void);
   56 #ifdef CPU_I486_ON_386
   57 static void init_i486_on_386(void);
   58 #endif
   59 static void init_6x86(void);
   60 #endif /* I486_CPU */
   61 
   62 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
   63 static void     enable_K5_wt_alloc(void);
   64 static void     enable_K6_wt_alloc(void);
   65 static void     enable_K6_2_wt_alloc(void);
   66 #endif
   67 
   68 #ifdef I686_CPU
   69 static void     init_6x86MX(void);
   70 static void     init_ppro(void);
   71 static void     init_mendocino(void);
   72 #endif
   73 
   74 static int      hw_instruction_sse;
   75 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
   76     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
   77 /*
   78  * -1: automatic (default)
   79  *  0: keep enable CLFLUSH
   80  *  1: force disable CLFLUSH
   81  */
   82 static int      hw_clflush_disable = -1;
   83 
   84 int     cpu;                    /* Are we 386, 386sx, 486, etc? */
   85 u_int   cpu_feature;            /* Feature flags */
   86 u_int   cpu_feature2;           /* Feature flags */
   87 u_int   amd_feature;            /* AMD feature flags */
   88 u_int   amd_feature2;           /* AMD feature flags */
   89 u_int   amd_pminfo;             /* AMD advanced power management info */
   90 u_int   via_feature_rng;        /* VIA RNG features */
   91 u_int   via_feature_xcrypt;     /* VIA ACE features */
   92 u_int   cpu_high;               /* Highest arg to CPUID */
   93 u_int   cpu_exthigh;            /* Highest arg to extended CPUID */
   94 u_int   cpu_id;                 /* Stepping ID */
   95 u_int   cpu_procinfo;           /* HyperThreading Info / Brand Index / CLFUSH */
   96 u_int   cpu_procinfo2;          /* Multicore info */
   97 char    cpu_vendor[20];         /* CPU Origin code */
   98 u_int   cpu_vendor_id;          /* CPU vendor ID */
   99 #ifdef CPU_ENABLE_SSE
  100 u_int   cpu_fxsr;               /* SSE enabled */
  101 u_int   cpu_mxcsr_mask;         /* Valid bits in mxcsr */
  102 #endif
  103 u_int   cpu_clflush_line_size = 32;
  104 u_int   cpu_stdext_feature;
  105 u_int   cpu_stdext_feature2;
  106 u_int   cpu_max_ext_state_size;
  107 u_int   cpu_mon_mwait_flags;    /* MONITOR/MWAIT flags (CPUID.05H.ECX) */
  108 u_int   cpu_mon_min_size;       /* MONITOR minimum range size, bytes */
  109 u_int   cpu_mon_max_size;       /* MONITOR minimum range size, bytes */
  110 u_int   cyrix_did;              /* Device ID of Cyrix CPU */
  111 u_int   cpu_maxphyaddr;         /* Max phys addr width in bits */
  112 
  113 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
  114         &via_feature_rng, 0, "VIA RNG feature available in CPU");
  115 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
  116         &via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU");
  117 
  118 #ifdef I486_CPU
  119 /*
  120  * IBM Blue Lightning
  121  */
  122 static void
  123 init_bluelightning(void)
  124 {
  125         register_t saveintr;
  126 
  127 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  128         need_post_dma_flush = 1;
  129 #endif
  130 
  131         saveintr = intr_disable();
  132 
  133         load_cr0(rcr0() | CR0_CD | CR0_NW);
  134         invd();
  135 
  136 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
  137         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
  138 #else
  139         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
  140 #endif
  141         /* Enables 13MB and 0-640KB cache. */
  142         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
  143 #ifdef CPU_BLUELIGHTNING_3X
  144         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
  145 #else
  146         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
  147 #endif
  148 
  149         /* Enable caching in CR0. */
  150         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  151         invd();
  152         intr_restore(saveintr);
  153 }
  154 
  155 /*
  156  * Cyrix 486SLC/DLC/SR/DR series
  157  */
  158 static void
  159 init_486dlc(void)
  160 {
  161         register_t saveintr;
  162         u_char  ccr0;
  163 
  164         saveintr = intr_disable();
  165         invd();
  166 
  167         ccr0 = read_cyrix_reg(CCR0);
  168 #ifndef CYRIX_CACHE_WORKS
  169         ccr0 |= CCR0_NC1 | CCR0_BARB;
  170         write_cyrix_reg(CCR0, ccr0);
  171         invd();
  172 #else
  173         ccr0 &= ~CCR0_NC0;
  174 #ifndef CYRIX_CACHE_REALLY_WORKS
  175         ccr0 |= CCR0_NC1 | CCR0_BARB;
  176 #else
  177         ccr0 |= CCR0_NC1;
  178 #endif
  179 #ifdef CPU_DIRECT_MAPPED_CACHE
  180         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
  181 #endif
  182         write_cyrix_reg(CCR0, ccr0);
  183 
  184         /* Clear non-cacheable region. */
  185         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
  186         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
  187         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
  188         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
  189 
  190         write_cyrix_reg(0, 0);  /* dummy write */
  191 
  192         /* Enable caching in CR0. */
  193         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  194         invd();
  195 #endif /* !CYRIX_CACHE_WORKS */
  196         intr_restore(saveintr);
  197 }
  198 
  199 
  200 /*
  201  * Cyrix 486S/DX series
  202  */
  203 static void
  204 init_cy486dx(void)
  205 {
  206         register_t saveintr;
  207         u_char  ccr2;
  208 
  209         saveintr = intr_disable();
  210         invd();
  211 
  212         ccr2 = read_cyrix_reg(CCR2);
  213 #ifdef CPU_SUSP_HLT
  214         ccr2 |= CCR2_SUSP_HLT;
  215 #endif
  216 
  217 #ifdef PC98
  218         /* Enables WB cache interface pin and Lock NW bit in CR0. */
  219         ccr2 |= CCR2_WB | CCR2_LOCK_NW;
  220         /* Unlock NW bit in CR0. */
  221         write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
  222         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  223 #endif
  224 
  225         write_cyrix_reg(CCR2, ccr2);
  226         intr_restore(saveintr);
  227 }
  228 
  229 
  230 /*
  231  * Cyrix 5x86
  232  */
  233 static void
  234 init_5x86(void)
  235 {
  236         register_t saveintr;
  237         u_char  ccr2, ccr3, ccr4, pcr0;
  238 
  239         saveintr = intr_disable();
  240 
  241         load_cr0(rcr0() | CR0_CD | CR0_NW);
  242         wbinvd();
  243 
  244         (void)read_cyrix_reg(CCR3);             /* dummy */
  245 
  246         /* Initialize CCR2. */
  247         ccr2 = read_cyrix_reg(CCR2);
  248         ccr2 |= CCR2_WB;
  249 #ifdef CPU_SUSP_HLT
  250         ccr2 |= CCR2_SUSP_HLT;
  251 #else
  252         ccr2 &= ~CCR2_SUSP_HLT;
  253 #endif
  254         ccr2 |= CCR2_WT1;
  255         write_cyrix_reg(CCR2, ccr2);
  256 
  257         /* Initialize CCR4. */
  258         ccr3 = read_cyrix_reg(CCR3);
  259         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  260 
  261         ccr4 = read_cyrix_reg(CCR4);
  262         ccr4 |= CCR4_DTE;
  263         ccr4 |= CCR4_MEM;
  264 #ifdef CPU_FASTER_5X86_FPU
  265         ccr4 |= CCR4_FASTFPE;
  266 #else
  267         ccr4 &= ~CCR4_FASTFPE;
  268 #endif
  269         ccr4 &= ~CCR4_IOMASK;
  270         /********************************************************************
  271          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
  272          * should be 0 for errata fix.
  273          ********************************************************************/
  274 #ifdef CPU_IORT
  275         ccr4 |= CPU_IORT & CCR4_IOMASK;
  276 #endif
  277         write_cyrix_reg(CCR4, ccr4);
  278 
  279         /* Initialize PCR0. */
  280         /****************************************************************
  281          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
  282          * BTB_EN might make your system unstable.
  283          ****************************************************************/
  284         pcr0 = read_cyrix_reg(PCR0);
  285 #ifdef CPU_RSTK_EN
  286         pcr0 |= PCR0_RSTK;
  287 #else
  288         pcr0 &= ~PCR0_RSTK;
  289 #endif
  290 #ifdef CPU_BTB_EN
  291         pcr0 |= PCR0_BTB;
  292 #else
  293         pcr0 &= ~PCR0_BTB;
  294 #endif
  295 #ifdef CPU_LOOP_EN
  296         pcr0 |= PCR0_LOOP;
  297 #else
  298         pcr0 &= ~PCR0_LOOP;
  299 #endif
  300 
  301         /****************************************************************
  302          * WARNING: if you use a memory mapped I/O device, don't use
  303          * DISABLE_5X86_LSSER option, which may reorder memory mapped
  304          * I/O access.
  305          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
  306          ****************************************************************/
  307 #ifdef CPU_DISABLE_5X86_LSSER
  308         pcr0 &= ~PCR0_LSSER;
  309 #else
  310         pcr0 |= PCR0_LSSER;
  311 #endif
  312         write_cyrix_reg(PCR0, pcr0);
  313 
  314         /* Restore CCR3. */
  315         write_cyrix_reg(CCR3, ccr3);
  316 
  317         (void)read_cyrix_reg(0x80);             /* dummy */
  318 
  319         /* Unlock NW bit in CR0. */
  320         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  321         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  322         /* Lock NW bit in CR0. */
  323         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  324 
  325         intr_restore(saveintr);
  326 }
  327 
  328 #ifdef CPU_I486_ON_386
  329 /*
  330  * There are i486 based upgrade products for i386 machines.
  331  * In this case, BIOS doesn't enable CPU cache.
  332  */
  333 static void
  334 init_i486_on_386(void)
  335 {
  336         register_t saveintr;
  337 
  338 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  339         need_post_dma_flush = 1;
  340 #endif
  341 
  342         saveintr = intr_disable();
  343 
  344         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
  345 
  346         intr_restore(saveintr);
  347 }
  348 #endif
  349 
  350 /*
  351  * Cyrix 6x86
  352  *
  353  * XXX - What should I do here?  Please let me know.
  354  */
  355 static void
  356 init_6x86(void)
  357 {
  358         register_t saveintr;
  359         u_char  ccr3, ccr4;
  360 
  361         saveintr = intr_disable();
  362 
  363         load_cr0(rcr0() | CR0_CD | CR0_NW);
  364         wbinvd();
  365 
  366         /* Initialize CCR0. */
  367         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  368 
  369         /* Initialize CCR1. */
  370 #ifdef CPU_CYRIX_NO_LOCK
  371         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  372 #else
  373         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  374 #endif
  375 
  376         /* Initialize CCR2. */
  377 #ifdef CPU_SUSP_HLT
  378         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  379 #else
  380         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  381 #endif
  382 
  383         ccr3 = read_cyrix_reg(CCR3);
  384         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  385 
  386         /* Initialize CCR4. */
  387         ccr4 = read_cyrix_reg(CCR4);
  388         ccr4 |= CCR4_DTE;
  389         ccr4 &= ~CCR4_IOMASK;
  390 #ifdef CPU_IORT
  391         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  392 #else
  393         write_cyrix_reg(CCR4, ccr4 | 7);
  394 #endif
  395 
  396         /* Initialize CCR5. */
  397 #ifdef CPU_WT_ALLOC
  398         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  399 #endif
  400 
  401         /* Restore CCR3. */
  402         write_cyrix_reg(CCR3, ccr3);
  403 
  404         /* Unlock NW bit in CR0. */
  405         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  406 
  407         /*
  408          * Earlier revision of the 6x86 CPU could crash the system if
  409          * L1 cache is in write-back mode.
  410          */
  411         if ((cyrix_did & 0xff00) > 0x1600)
  412                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  413         else {
  414                 /* Revision 2.6 and lower. */
  415 #ifdef CYRIX_CACHE_REALLY_WORKS
  416                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  417 #else
  418                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
  419 #endif
  420         }
  421 
  422         /* Lock NW bit in CR0. */
  423         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  424 
  425         intr_restore(saveintr);
  426 }
  427 #endif /* I486_CPU */
  428 
  429 #ifdef I586_CPU
  430 /*
  431  * Rise mP6
  432  */
  433 static void
  434 init_rise(void)
  435 {
  436 
  437         /*
  438          * The CMPXCHG8B instruction is always available but hidden.
  439          */
  440         cpu_feature |= CPUID_CX8;
  441 }
  442 
  443 /*
  444  * IDT WinChip C6/2/2A/2B/3
  445  *
  446  * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
  447  */
  448 static void
  449 init_winchip(void)
  450 {
  451         u_int regs[4];
  452         uint64_t fcr;
  453 
  454         fcr = rdmsr(0x0107);
  455 
  456         /*
  457          * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
  458          */
  459         fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
  460         fcr &= ~(1ULL << 11);
  461 
  462         /*
  463          * Additionally, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
  464          */
  465         if (CPUID_TO_MODEL(cpu_id) >= 8)
  466                 fcr |= (1 << 12) | (1 << 19) | (1 << 20);
  467 
  468         wrmsr(0x0107, fcr);
  469         do_cpuid(1, regs);
  470         cpu_feature = regs[3];
  471 }
  472 #endif
  473 
  474 #ifdef I686_CPU
  475 /*
  476  * Cyrix 6x86MX (code-named M2)
  477  *
  478  * XXX - What should I do here?  Please let me know.
  479  */
  480 static void
  481 init_6x86MX(void)
  482 {
  483         register_t saveintr;
  484         u_char  ccr3, ccr4;
  485 
  486         saveintr = intr_disable();
  487 
  488         load_cr0(rcr0() | CR0_CD | CR0_NW);
  489         wbinvd();
  490 
  491         /* Initialize CCR0. */
  492         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  493 
  494         /* Initialize CCR1. */
  495 #ifdef CPU_CYRIX_NO_LOCK
  496         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  497 #else
  498         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  499 #endif
  500 
  501         /* Initialize CCR2. */
  502 #ifdef CPU_SUSP_HLT
  503         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  504 #else
  505         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  506 #endif
  507 
  508         ccr3 = read_cyrix_reg(CCR3);
  509         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  510 
  511         /* Initialize CCR4. */
  512         ccr4 = read_cyrix_reg(CCR4);
  513         ccr4 &= ~CCR4_IOMASK;
  514 #ifdef CPU_IORT
  515         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  516 #else
  517         write_cyrix_reg(CCR4, ccr4 | 7);
  518 #endif
  519 
  520         /* Initialize CCR5. */
  521 #ifdef CPU_WT_ALLOC
  522         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  523 #endif
  524 
  525         /* Restore CCR3. */
  526         write_cyrix_reg(CCR3, ccr3);
  527 
  528         /* Unlock NW bit in CR0. */
  529         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  530 
  531         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  532 
  533         /* Lock NW bit in CR0. */
  534         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  535 
  536         intr_restore(saveintr);
  537 }
  538 
  539 static int ppro_apic_used = -1;
  540 
  541 static void
  542 init_ppro(void)
  543 {
  544         u_int64_t       apicbase;
  545 
  546         /*
  547          * Local APIC should be disabled if it is not going to be used.
  548          */
  549         if (ppro_apic_used != 1) {
  550                 apicbase = rdmsr(MSR_APICBASE);
  551                 apicbase &= ~APICBASE_ENABLED;
  552                 wrmsr(MSR_APICBASE, apicbase);
  553                 ppro_apic_used = 0;
  554         }
  555 }
  556 
  557 /*
  558  * If the local APIC is going to be used after being disabled above,
  559  * re-enable it and don't disable it in the future.
  560  */
  561 void
  562 ppro_reenable_apic(void)
  563 {
  564         u_int64_t       apicbase;
  565 
  566         if (ppro_apic_used == 0) {
  567                 apicbase = rdmsr(MSR_APICBASE);
  568                 apicbase |= APICBASE_ENABLED;
  569                 wrmsr(MSR_APICBASE, apicbase);
  570                 ppro_apic_used = 1;
  571         }
  572 }
  573 
  574 /*
  575  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
  576  * L2 cache).
  577  */
  578 static void
  579 init_mendocino(void)
  580 {
  581 #ifdef CPU_PPRO2CELERON
  582         register_t      saveintr;
  583         u_int64_t       bbl_cr_ctl3;
  584 
  585         saveintr = intr_disable();
  586 
  587         load_cr0(rcr0() | CR0_CD | CR0_NW);
  588         wbinvd();
  589 
  590         bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
  591 
  592         /* If the L2 cache is configured, do nothing. */
  593         if (!(bbl_cr_ctl3 & 1)) {
  594                 bbl_cr_ctl3 = 0x134052bLL;
  595 
  596                 /* Set L2 Cache Latency (Default: 5). */
  597 #ifdef  CPU_CELERON_L2_LATENCY
  598 #if CPU_L2_LATENCY > 15
  599 #error invalid CPU_L2_LATENCY.
  600 #endif
  601                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
  602 #else
  603                 bbl_cr_ctl3 |= 5 << 1;
  604 #endif
  605                 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
  606         }
  607 
  608         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
  609         intr_restore(saveintr);
  610 #endif /* CPU_PPRO2CELERON */
  611 }
  612 
  613 /*
  614  * Initialize special VIA features
  615  */
  616 static void
  617 init_via(void)
  618 {
  619         u_int regs[4], val;
  620         uint64_t fcr;
  621 
  622         /*
  623          * Explicitly enable CX8 and PGE on C3.
  624          *
  625          * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
  626          */
  627         if (CPUID_TO_MODEL(cpu_id) <= 9)
  628                 fcr = (1 << 1) | (1 << 7);
  629         else
  630                 fcr = 0;
  631 
  632         /*
  633          * Check extended CPUID for PadLock features.
  634          *
  635          * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
  636          */
  637         do_cpuid(0xc0000000, regs);
  638         if (regs[0] >= 0xc0000001) {
  639                 do_cpuid(0xc0000001, regs);
  640                 val = regs[3];
  641         } else
  642                 val = 0;
  643 
  644         /* Enable RNG if present. */
  645         if ((val & VIA_CPUID_HAS_RNG) != 0) {
  646                 via_feature_rng = VIA_HAS_RNG;
  647                 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
  648         }
  649 
  650         /* Enable PadLock if present. */
  651         if ((val & VIA_CPUID_HAS_ACE) != 0)
  652                 via_feature_xcrypt |= VIA_HAS_AES;
  653         if ((val & VIA_CPUID_HAS_ACE2) != 0)
  654                 via_feature_xcrypt |= VIA_HAS_AESCTR;
  655         if ((val & VIA_CPUID_HAS_PHE) != 0)
  656                 via_feature_xcrypt |= VIA_HAS_SHA;
  657         if ((val & VIA_CPUID_HAS_PMM) != 0)
  658                 via_feature_xcrypt |= VIA_HAS_MM;
  659         if (via_feature_xcrypt != 0)
  660                 fcr |= 1 << 28;
  661 
  662         wrmsr(0x1107, rdmsr(0x1107) | fcr);
  663 }
  664 
  665 #endif /* I686_CPU */
  666 
  667 #if defined(I586_CPU) || defined(I686_CPU)
  668 static void
  669 init_transmeta(void)
  670 {
  671         u_int regs[0];
  672 
  673         /* Expose all hidden features. */
  674         wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
  675         do_cpuid(1, regs);
  676         cpu_feature = regs[3];
  677 }
  678 #endif
  679 
  680 extern int elf32_nxstack;
  681 
  682 void
  683 initializecpu(void)
  684 {
  685 
  686         switch (cpu) {
  687 #ifdef I486_CPU
  688         case CPU_BLUE:
  689                 init_bluelightning();
  690                 break;
  691         case CPU_486DLC:
  692                 init_486dlc();
  693                 break;
  694         case CPU_CY486DX:
  695                 init_cy486dx();
  696                 break;
  697         case CPU_M1SC:
  698                 init_5x86();
  699                 break;
  700 #ifdef CPU_I486_ON_386
  701         case CPU_486:
  702                 init_i486_on_386();
  703                 break;
  704 #endif
  705         case CPU_M1:
  706                 init_6x86();
  707                 break;
  708 #endif /* I486_CPU */
  709 #ifdef I586_CPU
  710         case CPU_586:
  711                 switch (cpu_vendor_id) {
  712                 case CPU_VENDOR_AMD:
  713 #ifdef CPU_WT_ALLOC
  714                         if (((cpu_id & 0x0f0) > 0) &&
  715                             ((cpu_id & 0x0f0) < 0x60) &&
  716                             ((cpu_id & 0x00f) > 3))
  717                                 enable_K5_wt_alloc();
  718                         else if (((cpu_id & 0x0f0) > 0x80) ||
  719                             (((cpu_id & 0x0f0) == 0x80) &&
  720                                 (cpu_id & 0x00f) > 0x07))
  721                                 enable_K6_2_wt_alloc();
  722                         else if ((cpu_id & 0x0f0) > 0x50)
  723                                 enable_K6_wt_alloc();
  724 #endif
  725                         if ((cpu_id & 0xf0) == 0xa0)
  726                                 /*
  727                                  * Make sure the TSC runs through
  728                                  * suspension, otherwise we can't use
  729                                  * it as timecounter
  730                                  */
  731                                 wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
  732                         break;
  733                 case CPU_VENDOR_CENTAUR:
  734                         init_winchip();
  735                         break;
  736                 case CPU_VENDOR_TRANSMETA:
  737                         init_transmeta();
  738                         break;
  739                 case CPU_VENDOR_RISE:
  740                         init_rise();
  741                         break;
  742                 }
  743                 break;
  744 #endif
  745 #ifdef I686_CPU
  746         case CPU_M2:
  747                 init_6x86MX();
  748                 break;
  749         case CPU_686:
  750                 switch (cpu_vendor_id) {
  751                 case CPU_VENDOR_INTEL:
  752                         switch (cpu_id & 0xff0) {
  753                         case 0x610:
  754                                 init_ppro();
  755                                 break;
  756                         case 0x660:
  757                                 init_mendocino();
  758                                 break;
  759                         }
  760                         break;
  761 #ifdef CPU_ATHLON_SSE_HACK
  762                 case CPU_VENDOR_AMD:
  763                         /*
  764                          * Sometimes the BIOS doesn't enable SSE instructions.
  765                          * According to AMD document 20734, the mobile
  766                          * Duron, the (mobile) Athlon 4 and the Athlon MP
  767                          * support SSE. These correspond to cpu_id 0x66X
  768                          * or 0x67X.
  769                          */
  770                         if ((cpu_feature & CPUID_XMM) == 0 &&
  771                             ((cpu_id & ~0xf) == 0x660 ||
  772                              (cpu_id & ~0xf) == 0x670 ||
  773                              (cpu_id & ~0xf) == 0x680)) {
  774                                 u_int regs[4];
  775                                 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
  776                                 do_cpuid(1, regs);
  777                                 cpu_feature = regs[3];
  778                         }
  779                         break;
  780 #endif
  781                 case CPU_VENDOR_CENTAUR:
  782                         init_via();
  783                         break;
  784                 case CPU_VENDOR_TRANSMETA:
  785                         init_transmeta();
  786                         break;
  787                 }
  788                 break;
  789 #endif
  790         default:
  791                 break;
  792         }
  793 #if defined(CPU_ENABLE_SSE)
  794         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
  795                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
  796                 cpu_fxsr = hw_instruction_sse = 1;
  797         }
  798 #endif
  799 #if defined(PAE) || defined(PAE_TABLES)
  800         if ((amd_feature & AMDID_NX) != 0) {
  801                 uint64_t msr;
  802 
  803                 msr = rdmsr(MSR_EFER) | EFER_NXE;
  804                 wrmsr(MSR_EFER, msr);
  805                 pg_nx = PG_NX;
  806                 elf32_nxstack = 1;
  807         }
  808 #endif
  809 }
  810 
  811 void
  812 initializecpucache(void)
  813 {
  814 
  815         /*
  816          * CPUID with %eax = 1, %ebx returns
  817          * Bits 15-8: CLFLUSH line size
  818          *      (Value * 8 = cache line size in bytes)
  819          */
  820         if ((cpu_feature & CPUID_CLFSH) != 0)
  821                 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
  822         /*
  823          * XXXKIB: (temporary) hack to work around traps generated
  824          * when CLFLUSHing APIC register window under virtualization
  825          * environments.  These environments tend to disable the
  826          * CPUID_SS feature even though the native CPU supports it.
  827          */
  828         TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
  829         if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
  830                 cpu_feature &= ~CPUID_CLFSH;
  831                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
  832         }
  833         /*
  834          * The kernel's use of CLFLUSH{,OPT} can be disabled manually
  835          * by setting the hw.clflush_disable tunable.
  836          */
  837         if (hw_clflush_disable == 1) {
  838                 cpu_feature &= ~CPUID_CLFSH;
  839                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
  840         }
  841 
  842 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  843         /*
  844          * OS should flush L1 cache by itself because no PC-98 supports
  845          * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
  846          * when need_pre_dma_flush = 1, use invd instruction after DMA
  847          * transfer when need_post_dma_flush = 1.  If your CPU upgrade
  848          * product supports hardware cache control, you can add the
  849          * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
  850          * This option eliminates unneeded cache flush instruction(s).
  851          */
  852         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
  853                 switch (cpu) {
  854 #ifdef I486_CPU
  855                 case CPU_486DLC:
  856                         need_post_dma_flush = 1;
  857                         break;
  858                 case CPU_M1SC:
  859                         need_pre_dma_flush = 1;
  860                         break;
  861                 case CPU_CY486DX:
  862                         need_pre_dma_flush = 1;
  863 #ifdef CPU_I486_ON_386
  864                         need_post_dma_flush = 1;
  865 #endif
  866                         break;
  867 #endif
  868                 default:
  869                         break;
  870                 }
  871         } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
  872                 switch (cpu_id & 0xFF0) {
  873                 case 0x470:             /* Enhanced Am486DX2 WB */
  874                 case 0x490:             /* Enhanced Am486DX4 WB */
  875                 case 0x4F0:             /* Am5x86 WB */
  876                         need_pre_dma_flush = 1;
  877                         break;
  878                 }
  879         } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
  880                 need_post_dma_flush = 1;
  881         } else {
  882 #ifdef CPU_I486_ON_386
  883                 need_pre_dma_flush = 1;
  884 #endif
  885         }
  886 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
  887 }
  888 
  889 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
  890 /*
  891  * Enable write allocate feature of AMD processors.
  892  * Following two functions require the Maxmem variable being set.
  893  */
  894 static void
  895 enable_K5_wt_alloc(void)
  896 {
  897         u_int64_t       msr;
  898         register_t      saveintr;
  899 
  900         /*
  901          * Write allocate is supported only on models 1, 2, and 3, with
  902          * a stepping of 4 or greater.
  903          */
  904         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
  905                 saveintr = intr_disable();
  906                 msr = rdmsr(0x83);              /* HWCR */
  907                 wrmsr(0x83, msr & !(0x10));
  908 
  909                 /*
  910                  * We have to tell the chip where the top of memory is,
  911                  * since video cards could have frame bufferes there,
  912                  * memory-mapped I/O could be there, etc.
  913                  */
  914                 if(Maxmem > 0)
  915                   msr = Maxmem / 16;
  916                 else
  917                   msr = 0;
  918                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
  919 #ifdef PC98
  920                 if (!(inb(0x43b) & 4)) {
  921                         wrmsr(0x86, 0x0ff00f0);
  922                         msr |= AMD_WT_ALLOC_PRE;
  923                 }
  924 #else
  925                 /*
  926                  * There is no way to know wheter 15-16M hole exists or not. 
  927                  * Therefore, we disable write allocate for this range.
  928                  */
  929                         wrmsr(0x86, 0x0ff00f0);
  930                         msr |= AMD_WT_ALLOC_PRE;
  931 #endif
  932                 wrmsr(0x85, msr);
  933 
  934                 msr=rdmsr(0x83);
  935                 wrmsr(0x83, msr|0x10); /* enable write allocate */
  936                 intr_restore(saveintr);
  937         }
  938 }
  939 
  940 static void
  941 enable_K6_wt_alloc(void)
  942 {
  943         quad_t  size;
  944         u_int64_t       whcr;
  945         register_t      saveintr;
  946 
  947         saveintr = intr_disable();
  948         wbinvd();
  949 
  950 #ifdef CPU_DISABLE_CACHE
  951         /*
  952          * Certain K6-2 box becomes unstable when write allocation is
  953          * enabled.
  954          */
  955         /*
  956          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  957          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  958          * All other bits in TR12 have no effect on the processer's operation.
  959          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  960          * on the AMD-K6.
  961          */
  962         wrmsr(0x0000000e, (u_int64_t)0x0008);
  963 #endif
  964         /* Don't assume that memory size is aligned with 4M. */
  965         if (Maxmem > 0)
  966           size = ((Maxmem >> 8) + 3) >> 2;
  967         else
  968           size = 0;
  969 
  970         /* Limit is 508M bytes. */
  971         if (size > 0x7f)
  972                 size = 0x7f;
  973         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
  974 
  975 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  976         if (whcr & (0x7fLL << 1)) {
  977 #ifdef PC98
  978                 /*
  979                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  980                  * 15-16M range.
  981                  */
  982                 if (!(inb(0x43b) & 4))
  983                         whcr &= ~0x0001LL;
  984                 else
  985 #endif
  986                         whcr |=  0x0001LL;
  987         }
  988 #else
  989         /*
  990          * There is no way to know wheter 15-16M hole exists or not. 
  991          * Therefore, we disable write allocate for this range.
  992          */
  993         whcr &= ~0x0001LL;
  994 #endif
  995         wrmsr(0x0c0000082, whcr);
  996 
  997         intr_restore(saveintr);
  998 }
  999 
 1000 static void
 1001 enable_K6_2_wt_alloc(void)
 1002 {
 1003         quad_t  size;
 1004         u_int64_t       whcr;
 1005         register_t      saveintr;
 1006 
 1007         saveintr = intr_disable();
 1008         wbinvd();
 1009 
 1010 #ifdef CPU_DISABLE_CACHE
 1011         /*
 1012          * Certain K6-2 box becomes unstable when write allocation is
 1013          * enabled.
 1014          */
 1015         /*
 1016          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
 1017          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
 1018          * All other bits in TR12 have no effect on the processer's operation.
 1019          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
 1020          * on the AMD-K6.
 1021          */
 1022         wrmsr(0x0000000e, (u_int64_t)0x0008);
 1023 #endif
 1024         /* Don't assume that memory size is aligned with 4M. */
 1025         if (Maxmem > 0)
 1026           size = ((Maxmem >> 8) + 3) >> 2;
 1027         else
 1028           size = 0;
 1029 
 1030         /* Limit is 4092M bytes. */
 1031         if (size > 0x3fff)
 1032                 size = 0x3ff;
 1033         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
 1034 
 1035 #if defined(PC98) || defined(NO_MEMORY_HOLE)
 1036         if (whcr & (0x3ffLL << 22)) {
 1037 #ifdef PC98
 1038                 /*
 1039                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
 1040                  * 15-16M range.
 1041                  */
 1042                 if (!(inb(0x43b) & 4))
 1043                         whcr &= ~(1LL << 16);
 1044                 else
 1045 #endif
 1046                         whcr |=  1LL << 16;
 1047         }
 1048 #else
 1049         /*
 1050          * There is no way to know wheter 15-16M hole exists or not. 
 1051          * Therefore, we disable write allocate for this range.
 1052          */
 1053         whcr &= ~(1LL << 16);
 1054 #endif
 1055         wrmsr(0x0c0000082, whcr);
 1056 
 1057         intr_restore(saveintr);
 1058 }
 1059 #endif /* I585_CPU && CPU_WT_ALLOC */
 1060 
 1061 #include "opt_ddb.h"
 1062 #ifdef DDB
 1063 #include <ddb/ddb.h>
 1064 
 1065 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
 1066 {
 1067         register_t saveintr;
 1068         u_int   cr0;
 1069         u_char  ccr1, ccr2, ccr3;
 1070         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
 1071 
 1072         cr0 = rcr0();
 1073         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
 1074                 saveintr = intr_disable();
 1075 
 1076 
 1077                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
 1078                         ccr0 = read_cyrix_reg(CCR0);
 1079                 }
 1080                 ccr1 = read_cyrix_reg(CCR1);
 1081                 ccr2 = read_cyrix_reg(CCR2);
 1082                 ccr3 = read_cyrix_reg(CCR3);
 1083                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
 1084                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
 1085                         ccr4 = read_cyrix_reg(CCR4);
 1086                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
 1087                                 ccr5 = read_cyrix_reg(CCR5);
 1088                         else
 1089                                 pcr0 = read_cyrix_reg(PCR0);
 1090                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
 1091                 }
 1092                 intr_restore(saveintr);
 1093 
 1094                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
 1095                         printf("CCR0=%x, ", (u_int)ccr0);
 1096 
 1097                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
 1098                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
 1099                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
 1100                         printf(", CCR4=%x, ", (u_int)ccr4);
 1101                         if (cpu == CPU_M1SC)
 1102                                 printf("PCR0=%x\n", pcr0);
 1103                         else
 1104                                 printf("CCR5=%x\n", ccr5);
 1105                 }
 1106         }
 1107         printf("CR0=%x\n", cr0);
 1108 }
 1109 #endif /* DDB */

Cache object: c521e870fb5572893904604e635e1f1b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.