The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/initcpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) KATO Takenori, 1997, 1998.
    5  *
    6  * All rights reserved.  Unpublished rights reserved under the copyright
    7  * laws of Japan.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  *
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer as
   15  *    the first lines of this file unmodified.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include "opt_cpu.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/kernel.h>
   39 #include <sys/systm.h>
   40 #include <sys/sysctl.h>
   41 
   42 #include <machine/cputypes.h>
   43 #include <machine/md_var.h>
   44 #include <machine/psl.h>
   45 #include <machine/specialreg.h>
   46 
   47 #include <vm/vm.h>
   48 #include <vm/pmap.h>
   49 
   50 #ifdef I486_CPU
   51 static void init_5x86(void);
   52 static void init_bluelightning(void);
   53 static void init_486dlc(void);
   54 static void init_cy486dx(void);
   55 #ifdef CPU_I486_ON_386
   56 static void init_i486_on_386(void);
   57 #endif
   58 static void init_6x86(void);
   59 #endif /* I486_CPU */
   60 
   61 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
   62 static void     enable_K5_wt_alloc(void);
   63 static void     enable_K6_wt_alloc(void);
   64 static void     enable_K6_2_wt_alloc(void);
   65 #endif
   66 
   67 #ifdef I686_CPU
   68 static void     init_6x86MX(void);
   69 static void     init_ppro(void);
   70 static void     init_mendocino(void);
   71 #endif
   72 
   73 static int      hw_instruction_sse;
   74 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
   75     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
   76 /*
   77  * -1: automatic (default)
   78  *  0: keep enable CLFLUSH
   79  *  1: force disable CLFLUSH
   80  */
   81 static int      hw_clflush_disable = -1;
   82 
   83 u_int   cyrix_did;              /* Device ID of Cyrix CPU */
   84 
   85 #ifdef I486_CPU
   86 /*
   87  * IBM Blue Lightning
   88  */
   89 static void
   90 init_bluelightning(void)
   91 {
   92         register_t saveintr;
   93 
   94         saveintr = intr_disable();
   95 
   96         load_cr0(rcr0() | CR0_CD | CR0_NW);
   97         invd();
   98 
   99 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
  100         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
  101 #else
  102         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
  103 #endif
  104         /* Enables 13MB and 0-640KB cache. */
  105         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
  106 #ifdef CPU_BLUELIGHTNING_3X
  107         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
  108 #else
  109         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
  110 #endif
  111 
  112         /* Enable caching in CR0. */
  113         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  114         invd();
  115         intr_restore(saveintr);
  116 }
  117 
  118 /*
  119  * Cyrix 486SLC/DLC/SR/DR series
  120  */
  121 static void
  122 init_486dlc(void)
  123 {
  124         register_t saveintr;
  125         u_char  ccr0;
  126 
  127         saveintr = intr_disable();
  128         invd();
  129 
  130         ccr0 = read_cyrix_reg(CCR0);
  131 #ifndef CYRIX_CACHE_WORKS
  132         ccr0 |= CCR0_NC1 | CCR0_BARB;
  133         write_cyrix_reg(CCR0, ccr0);
  134         invd();
  135 #else
  136         ccr0 &= ~CCR0_NC0;
  137 #ifndef CYRIX_CACHE_REALLY_WORKS
  138         ccr0 |= CCR0_NC1 | CCR0_BARB;
  139 #else
  140         ccr0 |= CCR0_NC1;
  141 #endif
  142 #ifdef CPU_DIRECT_MAPPED_CACHE
  143         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
  144 #endif
  145         write_cyrix_reg(CCR0, ccr0);
  146 
  147         /* Clear non-cacheable region. */
  148         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
  149         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
  150         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
  151         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
  152 
  153         write_cyrix_reg(0, 0);  /* dummy write */
  154 
  155         /* Enable caching in CR0. */
  156         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  157         invd();
  158 #endif /* !CYRIX_CACHE_WORKS */
  159         intr_restore(saveintr);
  160 }
  161 
  162 
  163 /*
  164  * Cyrix 486S/DX series
  165  */
  166 static void
  167 init_cy486dx(void)
  168 {
  169         register_t saveintr;
  170         u_char  ccr2;
  171 
  172         saveintr = intr_disable();
  173         invd();
  174 
  175         ccr2 = read_cyrix_reg(CCR2);
  176 #ifdef CPU_SUSP_HLT
  177         ccr2 |= CCR2_SUSP_HLT;
  178 #endif
  179 
  180         write_cyrix_reg(CCR2, ccr2);
  181         intr_restore(saveintr);
  182 }
  183 
  184 
  185 /*
  186  * Cyrix 5x86
  187  */
  188 static void
  189 init_5x86(void)
  190 {
  191         register_t saveintr;
  192         u_char  ccr2, ccr3, ccr4, pcr0;
  193 
  194         saveintr = intr_disable();
  195 
  196         load_cr0(rcr0() | CR0_CD | CR0_NW);
  197         wbinvd();
  198 
  199         (void)read_cyrix_reg(CCR3);             /* dummy */
  200 
  201         /* Initialize CCR2. */
  202         ccr2 = read_cyrix_reg(CCR2);
  203         ccr2 |= CCR2_WB;
  204 #ifdef CPU_SUSP_HLT
  205         ccr2 |= CCR2_SUSP_HLT;
  206 #else
  207         ccr2 &= ~CCR2_SUSP_HLT;
  208 #endif
  209         ccr2 |= CCR2_WT1;
  210         write_cyrix_reg(CCR2, ccr2);
  211 
  212         /* Initialize CCR4. */
  213         ccr3 = read_cyrix_reg(CCR3);
  214         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  215 
  216         ccr4 = read_cyrix_reg(CCR4);
  217         ccr4 |= CCR4_DTE;
  218         ccr4 |= CCR4_MEM;
  219 #ifdef CPU_FASTER_5X86_FPU
  220         ccr4 |= CCR4_FASTFPE;
  221 #else
  222         ccr4 &= ~CCR4_FASTFPE;
  223 #endif
  224         ccr4 &= ~CCR4_IOMASK;
  225         /********************************************************************
  226          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
  227          * should be 0 for errata fix.
  228          ********************************************************************/
  229 #ifdef CPU_IORT
  230         ccr4 |= CPU_IORT & CCR4_IOMASK;
  231 #endif
  232         write_cyrix_reg(CCR4, ccr4);
  233 
  234         /* Initialize PCR0. */
  235         /****************************************************************
  236          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
  237          * BTB_EN might make your system unstable.
  238          ****************************************************************/
  239         pcr0 = read_cyrix_reg(PCR0);
  240 #ifdef CPU_RSTK_EN
  241         pcr0 |= PCR0_RSTK;
  242 #else
  243         pcr0 &= ~PCR0_RSTK;
  244 #endif
  245 #ifdef CPU_BTB_EN
  246         pcr0 |= PCR0_BTB;
  247 #else
  248         pcr0 &= ~PCR0_BTB;
  249 #endif
  250 #ifdef CPU_LOOP_EN
  251         pcr0 |= PCR0_LOOP;
  252 #else
  253         pcr0 &= ~PCR0_LOOP;
  254 #endif
  255 
  256         /****************************************************************
  257          * WARNING: if you use a memory mapped I/O device, don't use
  258          * DISABLE_5X86_LSSER option, which may reorder memory mapped
  259          * I/O access.
  260          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
  261          ****************************************************************/
  262 #ifdef CPU_DISABLE_5X86_LSSER
  263         pcr0 &= ~PCR0_LSSER;
  264 #else
  265         pcr0 |= PCR0_LSSER;
  266 #endif
  267         write_cyrix_reg(PCR0, pcr0);
  268 
  269         /* Restore CCR3. */
  270         write_cyrix_reg(CCR3, ccr3);
  271 
  272         (void)read_cyrix_reg(0x80);             /* dummy */
  273 
  274         /* Unlock NW bit in CR0. */
  275         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  276         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  277         /* Lock NW bit in CR0. */
  278         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  279 
  280         intr_restore(saveintr);
  281 }
  282 
  283 #ifdef CPU_I486_ON_386
  284 /*
  285  * There are i486 based upgrade products for i386 machines.
  286  * In this case, BIOS doesn't enable CPU cache.
  287  */
  288 static void
  289 init_i486_on_386(void)
  290 {
  291         register_t saveintr;
  292 
  293         saveintr = intr_disable();
  294 
  295         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
  296 
  297         intr_restore(saveintr);
  298 }
  299 #endif
  300 
  301 /*
  302  * Cyrix 6x86
  303  *
  304  * XXX - What should I do here?  Please let me know.
  305  */
  306 static void
  307 init_6x86(void)
  308 {
  309         register_t saveintr;
  310         u_char  ccr3, ccr4;
  311 
  312         saveintr = intr_disable();
  313 
  314         load_cr0(rcr0() | CR0_CD | CR0_NW);
  315         wbinvd();
  316 
  317         /* Initialize CCR0. */
  318         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  319 
  320         /* Initialize CCR1. */
  321 #ifdef CPU_CYRIX_NO_LOCK
  322         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  323 #else
  324         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  325 #endif
  326 
  327         /* Initialize CCR2. */
  328 #ifdef CPU_SUSP_HLT
  329         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  330 #else
  331         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  332 #endif
  333 
  334         ccr3 = read_cyrix_reg(CCR3);
  335         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  336 
  337         /* Initialize CCR4. */
  338         ccr4 = read_cyrix_reg(CCR4);
  339         ccr4 |= CCR4_DTE;
  340         ccr4 &= ~CCR4_IOMASK;
  341 #ifdef CPU_IORT
  342         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  343 #else
  344         write_cyrix_reg(CCR4, ccr4 | 7);
  345 #endif
  346 
  347         /* Initialize CCR5. */
  348 #ifdef CPU_WT_ALLOC
  349         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  350 #endif
  351 
  352         /* Restore CCR3. */
  353         write_cyrix_reg(CCR3, ccr3);
  354 
  355         /* Unlock NW bit in CR0. */
  356         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  357 
  358         /*
  359          * Earlier revision of the 6x86 CPU could crash the system if
  360          * L1 cache is in write-back mode.
  361          */
  362         if ((cyrix_did & 0xff00) > 0x1600)
  363                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  364         else {
  365                 /* Revision 2.6 and lower. */
  366 #ifdef CYRIX_CACHE_REALLY_WORKS
  367                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  368 #else
  369                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
  370 #endif
  371         }
  372 
  373         /* Lock NW bit in CR0. */
  374         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  375 
  376         intr_restore(saveintr);
  377 }
  378 #endif /* I486_CPU */
  379 
  380 #ifdef I586_CPU
  381 /*
  382  * Rise mP6
  383  */
  384 static void
  385 init_rise(void)
  386 {
  387 
  388         /*
  389          * The CMPXCHG8B instruction is always available but hidden.
  390          */
  391         cpu_feature |= CPUID_CX8;
  392 }
  393 
  394 /*
  395  * IDT WinChip C6/2/2A/2B/3
  396  *
  397  * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
  398  */
  399 static void
  400 init_winchip(void)
  401 {
  402         u_int regs[4];
  403         uint64_t fcr;
  404 
  405         fcr = rdmsr(0x0107);
  406 
  407         /*
  408          * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
  409          */
  410         fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
  411         fcr &= ~(1ULL << 11);
  412 
  413         /*
  414          * Additionally, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
  415          */
  416         if (CPUID_TO_MODEL(cpu_id) >= 8)
  417                 fcr |= (1 << 12) | (1 << 19) | (1 << 20);
  418 
  419         wrmsr(0x0107, fcr);
  420         do_cpuid(1, regs);
  421         cpu_feature = regs[3];
  422 }
  423 #endif
  424 
  425 #ifdef I686_CPU
  426 /*
  427  * Cyrix 6x86MX (code-named M2)
  428  *
  429  * XXX - What should I do here?  Please let me know.
  430  */
  431 static void
  432 init_6x86MX(void)
  433 {
  434         register_t saveintr;
  435         u_char  ccr3, ccr4;
  436 
  437         saveintr = intr_disable();
  438 
  439         load_cr0(rcr0() | CR0_CD | CR0_NW);
  440         wbinvd();
  441 
  442         /* Initialize CCR0. */
  443         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  444 
  445         /* Initialize CCR1. */
  446 #ifdef CPU_CYRIX_NO_LOCK
  447         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  448 #else
  449         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  450 #endif
  451 
  452         /* Initialize CCR2. */
  453 #ifdef CPU_SUSP_HLT
  454         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  455 #else
  456         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  457 #endif
  458 
  459         ccr3 = read_cyrix_reg(CCR3);
  460         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  461 
  462         /* Initialize CCR4. */
  463         ccr4 = read_cyrix_reg(CCR4);
  464         ccr4 &= ~CCR4_IOMASK;
  465 #ifdef CPU_IORT
  466         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  467 #else
  468         write_cyrix_reg(CCR4, ccr4 | 7);
  469 #endif
  470 
  471         /* Initialize CCR5. */
  472 #ifdef CPU_WT_ALLOC
  473         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  474 #endif
  475 
  476         /* Restore CCR3. */
  477         write_cyrix_reg(CCR3, ccr3);
  478 
  479         /* Unlock NW bit in CR0. */
  480         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  481 
  482         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  483 
  484         /* Lock NW bit in CR0. */
  485         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  486 
  487         intr_restore(saveintr);
  488 }
  489 
  490 static int ppro_apic_used = -1;
  491 
  492 static void
  493 init_ppro(void)
  494 {
  495         u_int64_t       apicbase;
  496 
  497         /*
  498          * Local APIC should be disabled if it is not going to be used.
  499          */
  500         if (ppro_apic_used != 1) {
  501                 apicbase = rdmsr(MSR_APICBASE);
  502                 apicbase &= ~APICBASE_ENABLED;
  503                 wrmsr(MSR_APICBASE, apicbase);
  504                 ppro_apic_used = 0;
  505         }
  506 }
  507 
  508 /*
  509  * If the local APIC is going to be used after being disabled above,
  510  * re-enable it and don't disable it in the future.
  511  */
  512 void
  513 ppro_reenable_apic(void)
  514 {
  515         u_int64_t       apicbase;
  516 
  517         if (ppro_apic_used == 0) {
  518                 apicbase = rdmsr(MSR_APICBASE);
  519                 apicbase |= APICBASE_ENABLED;
  520                 wrmsr(MSR_APICBASE, apicbase);
  521                 ppro_apic_used = 1;
  522         }
  523 }
  524 
  525 /*
  526  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
  527  * L2 cache).
  528  */
  529 static void
  530 init_mendocino(void)
  531 {
  532 #ifdef CPU_PPRO2CELERON
  533         register_t      saveintr;
  534         u_int64_t       bbl_cr_ctl3;
  535 
  536         saveintr = intr_disable();
  537 
  538         load_cr0(rcr0() | CR0_CD | CR0_NW);
  539         wbinvd();
  540 
  541         bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
  542 
  543         /* If the L2 cache is configured, do nothing. */
  544         if (!(bbl_cr_ctl3 & 1)) {
  545                 bbl_cr_ctl3 = 0x134052bLL;
  546 
  547                 /* Set L2 Cache Latency (Default: 5). */
  548 #ifdef  CPU_CELERON_L2_LATENCY
  549 #if CPU_L2_LATENCY > 15
  550 #error invalid CPU_L2_LATENCY.
  551 #endif
  552                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
  553 #else
  554                 bbl_cr_ctl3 |= 5 << 1;
  555 #endif
  556                 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
  557         }
  558 
  559         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
  560         intr_restore(saveintr);
  561 #endif /* CPU_PPRO2CELERON */
  562 }
  563 
  564 /*
  565  * Initialize special VIA features
  566  */
  567 static void
  568 init_via(void)
  569 {
  570         u_int regs[4], val;
  571         uint64_t fcr;
  572 
  573         /*
  574          * Explicitly enable CX8 and PGE on C3.
  575          *
  576          * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
  577          */
  578         if (CPUID_TO_MODEL(cpu_id) <= 9)
  579                 fcr = (1 << 1) | (1 << 7);
  580         else
  581                 fcr = 0;
  582 
  583         /*
  584          * Check extended CPUID for PadLock features.
  585          *
  586          * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
  587          */
  588         do_cpuid(0xc0000000, regs);
  589         if (regs[0] >= 0xc0000001) {
  590                 do_cpuid(0xc0000001, regs);
  591                 val = regs[3];
  592         } else
  593                 val = 0;
  594 
  595         /* Enable RNG if present. */
  596         if ((val & VIA_CPUID_HAS_RNG) != 0) {
  597                 via_feature_rng = VIA_HAS_RNG;
  598                 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
  599         }
  600 
  601         /* Enable PadLock if present. */
  602         if ((val & VIA_CPUID_HAS_ACE) != 0)
  603                 via_feature_xcrypt |= VIA_HAS_AES;
  604         if ((val & VIA_CPUID_HAS_ACE2) != 0)
  605                 via_feature_xcrypt |= VIA_HAS_AESCTR;
  606         if ((val & VIA_CPUID_HAS_PHE) != 0)
  607                 via_feature_xcrypt |= VIA_HAS_SHA;
  608         if ((val & VIA_CPUID_HAS_PMM) != 0)
  609                 via_feature_xcrypt |= VIA_HAS_MM;
  610         if (via_feature_xcrypt != 0)
  611                 fcr |= 1 << 28;
  612 
  613         wrmsr(0x1107, rdmsr(0x1107) | fcr);
  614 }
  615 
  616 #endif /* I686_CPU */
  617 
  618 #if defined(I586_CPU) || defined(I686_CPU)
  619 static void
  620 init_transmeta(void)
  621 {
  622         u_int regs[0];
  623 
  624         /* Expose all hidden features. */
  625         wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
  626         do_cpuid(1, regs);
  627         cpu_feature = regs[3];
  628 }
  629 #endif
  630 
  631 /*
  632  * The value for the TSC_AUX MSR and rdtscp/rdpid on the invoking CPU.
  633  *
  634  * Caller should prevent CPU migration.
  635  */
  636 u_int
  637 cpu_auxmsr(void)
  638 {
  639         KASSERT((read_eflags() & PSL_I) == 0, ("context switch possible"));
  640         return (PCPU_GET(cpuid));
  641 }
  642 
  643 extern int elf32_nxstack;
  644 
  645 void
  646 initializecpu(void)
  647 {
  648 
  649         switch (cpu) {
  650 #ifdef I486_CPU
  651         case CPU_BLUE:
  652                 init_bluelightning();
  653                 break;
  654         case CPU_486DLC:
  655                 init_486dlc();
  656                 break;
  657         case CPU_CY486DX:
  658                 init_cy486dx();
  659                 break;
  660         case CPU_M1SC:
  661                 init_5x86();
  662                 break;
  663 #ifdef CPU_I486_ON_386
  664         case CPU_486:
  665                 init_i486_on_386();
  666                 break;
  667 #endif
  668         case CPU_M1:
  669                 init_6x86();
  670                 break;
  671 #endif /* I486_CPU */
  672 #ifdef I586_CPU
  673         case CPU_586:
  674                 switch (cpu_vendor_id) {
  675                 case CPU_VENDOR_AMD:
  676 #ifdef CPU_WT_ALLOC
  677                         if (((cpu_id & 0x0f0) > 0) &&
  678                             ((cpu_id & 0x0f0) < 0x60) &&
  679                             ((cpu_id & 0x00f) > 3))
  680                                 enable_K5_wt_alloc();
  681                         else if (((cpu_id & 0x0f0) > 0x80) ||
  682                             (((cpu_id & 0x0f0) == 0x80) &&
  683                                 (cpu_id & 0x00f) > 0x07))
  684                                 enable_K6_2_wt_alloc();
  685                         else if ((cpu_id & 0x0f0) > 0x50)
  686                                 enable_K6_wt_alloc();
  687 #endif
  688                         if ((cpu_id & 0xf0) == 0xa0)
  689                                 /*
  690                                  * Make sure the TSC runs through
  691                                  * suspension, otherwise we can't use
  692                                  * it as timecounter
  693                                  */
  694                                 wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
  695                         break;
  696                 case CPU_VENDOR_CENTAUR:
  697                         init_winchip();
  698                         break;
  699                 case CPU_VENDOR_TRANSMETA:
  700                         init_transmeta();
  701                         break;
  702                 case CPU_VENDOR_RISE:
  703                         init_rise();
  704                         break;
  705                 }
  706                 break;
  707 #endif
  708 #ifdef I686_CPU
  709         case CPU_M2:
  710                 init_6x86MX();
  711                 break;
  712         case CPU_686:
  713                 switch (cpu_vendor_id) {
  714                 case CPU_VENDOR_INTEL:
  715                         switch (cpu_id & 0xff0) {
  716                         case 0x610:
  717                                 init_ppro();
  718                                 break;
  719                         case 0x660:
  720                                 init_mendocino();
  721                                 break;
  722                         }
  723                         break;
  724                 case CPU_VENDOR_AMD:
  725 #ifdef CPU_ATHLON_SSE_HACK
  726                         /*
  727                          * Sometimes the BIOS doesn't enable SSE instructions.
  728                          * According to AMD document 20734, the mobile
  729                          * Duron, the (mobile) Athlon 4 and the Athlon MP
  730                          * support SSE. These correspond to cpu_id 0x66X
  731                          * or 0x67X.
  732                          */
  733                         if ((cpu_feature & CPUID_XMM) == 0 &&
  734                             ((cpu_id & ~0xf) == 0x660 ||
  735                              (cpu_id & ~0xf) == 0x670 ||
  736                              (cpu_id & ~0xf) == 0x680)) {
  737                                 u_int regs[4];
  738                                 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
  739                                 do_cpuid(1, regs);
  740                                 cpu_feature = regs[3];
  741                         }
  742 #endif
  743                         /*
  744                          * Detect C1E that breaks APIC.  See comment in
  745                          * amd64/initcpu.c.
  746                          */
  747                         if ((CPUID_TO_FAMILY(cpu_id) == 0xf ||
  748                             CPUID_TO_FAMILY(cpu_id) == 0x10) &&
  749                             (cpu_feature2 & CPUID2_HV) == 0)
  750                                 cpu_amdc1e_bug = 1;
  751                         break;
  752                 case CPU_VENDOR_CENTAUR:
  753                         init_via();
  754                         break;
  755                 case CPU_VENDOR_TRANSMETA:
  756                         init_transmeta();
  757                         break;
  758                 }
  759                 break;
  760 #endif
  761         default:
  762                 break;
  763         }
  764         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
  765                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
  766                 cpu_fxsr = hw_instruction_sse = 1;
  767         }
  768 #if defined(PAE) || defined(PAE_TABLES)
  769         if ((amd_feature & AMDID_NX) != 0) {
  770                 uint64_t msr;
  771 
  772                 msr = rdmsr(MSR_EFER) | EFER_NXE;
  773                 wrmsr(MSR_EFER, msr);
  774                 pg_nx = PG_NX;
  775                 elf32_nxstack = 1;
  776         }
  777 #endif
  778         if ((amd_feature & AMDID_RDTSCP) != 0 ||
  779             (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0)
  780                 wrmsr(MSR_TSC_AUX, cpu_auxmsr());
  781 }
  782 
  783 void
  784 initializecpucache(void)
  785 {
  786 
  787         /*
  788          * CPUID with %eax = 1, %ebx returns
  789          * Bits 15-8: CLFLUSH line size
  790          *      (Value * 8 = cache line size in bytes)
  791          */
  792         if ((cpu_feature & CPUID_CLFSH) != 0)
  793                 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
  794         /*
  795          * XXXKIB: (temporary) hack to work around traps generated
  796          * when CLFLUSHing APIC register window under virtualization
  797          * environments.  These environments tend to disable the
  798          * CPUID_SS feature even though the native CPU supports it.
  799          */
  800         TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
  801         if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
  802                 cpu_feature &= ~CPUID_CLFSH;
  803                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
  804         }
  805         /*
  806          * The kernel's use of CLFLUSH{,OPT} can be disabled manually
  807          * by setting the hw.clflush_disable tunable.
  808          */
  809         if (hw_clflush_disable == 1) {
  810                 cpu_feature &= ~CPUID_CLFSH;
  811                 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
  812         }
  813 }
  814 
  815 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
  816 /*
  817  * Enable write allocate feature of AMD processors.
  818  * Following two functions require the Maxmem variable being set.
  819  */
  820 static void
  821 enable_K5_wt_alloc(void)
  822 {
  823         u_int64_t       msr;
  824         register_t      saveintr;
  825 
  826         /*
  827          * Write allocate is supported only on models 1, 2, and 3, with
  828          * a stepping of 4 or greater.
  829          */
  830         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
  831                 saveintr = intr_disable();
  832                 msr = rdmsr(0x83);              /* HWCR */
  833                 wrmsr(0x83, msr & !(0x10));
  834 
  835                 /*
  836                  * We have to tell the chip where the top of memory is,
  837                  * since video cards could have frame bufferes there,
  838                  * memory-mapped I/O could be there, etc.
  839                  */
  840                 if(Maxmem > 0)
  841                   msr = Maxmem / 16;
  842                 else
  843                   msr = 0;
  844                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
  845 
  846                 /*
  847                  * There is no way to know whether 15-16M hole exists or not.
  848                  * Therefore, we disable write allocate for this range.
  849                  */
  850                 wrmsr(0x86, 0x0ff00f0);
  851                 msr |= AMD_WT_ALLOC_PRE;
  852                 wrmsr(0x85, msr);
  853 
  854                 msr=rdmsr(0x83);
  855                 wrmsr(0x83, msr|0x10); /* enable write allocate */
  856                 intr_restore(saveintr);
  857         }
  858 }
  859 
  860 static void
  861 enable_K6_wt_alloc(void)
  862 {
  863         quad_t  size;
  864         u_int64_t       whcr;
  865         register_t      saveintr;
  866 
  867         saveintr = intr_disable();
  868         wbinvd();
  869 
  870 #ifdef CPU_DISABLE_CACHE
  871         /*
  872          * Certain K6-2 box becomes unstable when write allocation is
  873          * enabled.
  874          */
  875         /*
  876          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  877          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  878          * All other bits in TR12 have no effect on the processer's operation.
  879          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  880          * on the AMD-K6.
  881          */
  882         wrmsr(0x0000000e, (u_int64_t)0x0008);
  883 #endif
  884         /* Don't assume that memory size is aligned with 4M. */
  885         if (Maxmem > 0)
  886           size = ((Maxmem >> 8) + 3) >> 2;
  887         else
  888           size = 0;
  889 
  890         /* Limit is 508M bytes. */
  891         if (size > 0x7f)
  892                 size = 0x7f;
  893         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
  894 
  895 #if defined(NO_MEMORY_HOLE)
  896         if (whcr & (0x7fLL << 1))
  897                 whcr |=  0x0001LL;
  898 #else
  899         /*
  900          * There is no way to know whether 15-16M hole exists or not.
  901          * Therefore, we disable write allocate for this range.
  902          */
  903         whcr &= ~0x0001LL;
  904 #endif
  905         wrmsr(0x0c0000082, whcr);
  906 
  907         intr_restore(saveintr);
  908 }
  909 
  910 static void
  911 enable_K6_2_wt_alloc(void)
  912 {
  913         quad_t  size;
  914         u_int64_t       whcr;
  915         register_t      saveintr;
  916 
  917         saveintr = intr_disable();
  918         wbinvd();
  919 
  920 #ifdef CPU_DISABLE_CACHE
  921         /*
  922          * Certain K6-2 box becomes unstable when write allocation is
  923          * enabled.
  924          */
  925         /*
  926          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  927          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  928          * All other bits in TR12 have no effect on the processer's operation.
  929          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  930          * on the AMD-K6.
  931          */
  932         wrmsr(0x0000000e, (u_int64_t)0x0008);
  933 #endif
  934         /* Don't assume that memory size is aligned with 4M. */
  935         if (Maxmem > 0)
  936           size = ((Maxmem >> 8) + 3) >> 2;
  937         else
  938           size = 0;
  939 
  940         /* Limit is 4092M bytes. */
  941         if (size > 0x3fff)
  942                 size = 0x3ff;
  943         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
  944 
  945 #if defined(NO_MEMORY_HOLE)
  946         if (whcr & (0x3ffLL << 22))
  947                 whcr |=  1LL << 16;
  948 #else
  949         /*
  950          * There is no way to know whether 15-16M hole exists or not.
  951          * Therefore, we disable write allocate for this range.
  952          */
  953         whcr &= ~(1LL << 16);
  954 #endif
  955         wrmsr(0x0c0000082, whcr);
  956 
  957         intr_restore(saveintr);
  958 }
  959 #endif /* I585_CPU && CPU_WT_ALLOC */
  960 
  961 #include "opt_ddb.h"
  962 #ifdef DDB
  963 #include <ddb/ddb.h>
  964 
  965 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
  966 {
  967         register_t saveintr;
  968         u_int   cr0;
  969         u_char  ccr1, ccr2, ccr3;
  970         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
  971 
  972         cr0 = rcr0();
  973         if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
  974                 saveintr = intr_disable();
  975 
  976 
  977                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
  978                         ccr0 = read_cyrix_reg(CCR0);
  979                 }
  980                 ccr1 = read_cyrix_reg(CCR1);
  981                 ccr2 = read_cyrix_reg(CCR2);
  982                 ccr3 = read_cyrix_reg(CCR3);
  983                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
  984                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  985                         ccr4 = read_cyrix_reg(CCR4);
  986                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
  987                                 ccr5 = read_cyrix_reg(CCR5);
  988                         else
  989                                 pcr0 = read_cyrix_reg(PCR0);
  990                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
  991                 }
  992                 intr_restore(saveintr);
  993 
  994                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
  995                         printf("CCR0=%x, ", (u_int)ccr0);
  996 
  997                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
  998                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
  999                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
 1000                         printf(", CCR4=%x, ", (u_int)ccr4);
 1001                         if (cpu == CPU_M1SC)
 1002                                 printf("PCR0=%x\n", pcr0);
 1003                         else
 1004                                 printf("CCR5=%x\n", ccr5);
 1005                 }
 1006         }
 1007         printf("CR0=%x\n", cr0);
 1008 }
 1009 #endif /* DDB */

Cache object: b8450d9c45ad52541abc8b0e263a0ca1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.