The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/initcpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) KATO Takenori, 1997, 1998.
    3  * 
    4  * All rights reserved.  Unpublished rights reserved under the copyright
    5  * laws of Japan.
    6  * 
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer as
   13  *    the first lines of this file unmodified.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 
   18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 
   32 #include "opt_cpu.h"
   33 
   34 #include <sys/param.h>
   35 #include <sys/kernel.h>
   36 #include <sys/systm.h>
   37 #include <sys/sysctl.h>
   38 
   39 #include <machine/cputypes.h>
   40 #include <machine/md_var.h>
   41 #include <machine/specialreg.h>
   42 
   43 void initializecpu(void);
   44 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
   45 void    enable_K5_wt_alloc(void);
   46 void    enable_K6_wt_alloc(void);
   47 void    enable_K6_2_wt_alloc(void);
   48 #endif
   49 
   50 #ifdef I486_CPU
   51 static void init_5x86(void);
   52 static void init_bluelightning(void);
   53 static void init_486dlc(void);
   54 static void init_cy486dx(void);
   55 #ifdef CPU_I486_ON_386
   56 static void init_i486_on_386(void);
   57 #endif
   58 static void init_6x86(void);
   59 #endif /* I486_CPU */
   60 
   61 #ifdef I686_CPU
   62 static void     init_6x86MX(void);
   63 static void     init_ppro(void);
   64 static void     init_mendocino(void);
   65 #endif
   66 
   67 static int      hw_instruction_sse;
   68 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
   69     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
   70 
   71 #ifdef CPU_ENABLE_SSE
   72 u_int   cpu_fxsr;               /* SSE enabled */
   73 #endif
   74 
   75 #ifdef I486_CPU
   76 /*
   77  * IBM Blue Lightning
   78  */
   79 static void
   80 init_bluelightning(void)
   81 {
   82         u_long  eflags;
   83 
   84 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
   85         need_post_dma_flush = 1;
   86 #endif
   87 
   88         eflags = read_eflags();
   89         disable_intr();
   90 
   91         load_cr0(rcr0() | CR0_CD | CR0_NW);
   92         invd();
   93 
   94 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
   95         wrmsr(0x1000, 0x9c92LL);        /* FP operand can be cacheable on Cyrix FPU */
   96 #else
   97         wrmsr(0x1000, 0x1c92LL);        /* Intel FPU */
   98 #endif
   99         /* Enables 13MB and 0-640KB cache. */
  100         wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
  101 #ifdef CPU_BLUELIGHTNING_3X
  102         wrmsr(0x1002, 0x04000000LL);    /* Enables triple-clock mode. */
  103 #else
  104         wrmsr(0x1002, 0x03000000LL);    /* Enables double-clock mode. */
  105 #endif
  106 
  107         /* Enable caching in CR0. */
  108         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  109         invd();
  110         write_eflags(eflags);
  111 }
  112 
  113 /*
  114  * Cyrix 486SLC/DLC/SR/DR series
  115  */
  116 static void
  117 init_486dlc(void)
  118 {
  119         u_long  eflags;
  120         u_char  ccr0;
  121 
  122         eflags = read_eflags();
  123         disable_intr();
  124         invd();
  125 
  126         ccr0 = read_cyrix_reg(CCR0);
  127 #ifndef CYRIX_CACHE_WORKS
  128         ccr0 |= CCR0_NC1 | CCR0_BARB;
  129         write_cyrix_reg(CCR0, ccr0);
  130         invd();
  131 #else
  132         ccr0 &= ~CCR0_NC0;
  133 #ifndef CYRIX_CACHE_REALLY_WORKS
  134         ccr0 |= CCR0_NC1 | CCR0_BARB;
  135 #else
  136         ccr0 |= CCR0_NC1;
  137 #endif
  138 #ifdef CPU_DIRECT_MAPPED_CACHE
  139         ccr0 |= CCR0_CO;                        /* Direct mapped mode. */
  140 #endif
  141         write_cyrix_reg(CCR0, ccr0);
  142 
  143         /* Clear non-cacheable region. */
  144         write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
  145         write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
  146         write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
  147         write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
  148 
  149         write_cyrix_reg(0, 0);  /* dummy write */
  150 
  151         /* Enable caching in CR0. */
  152         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  153         invd();
  154 #endif /* !CYRIX_CACHE_WORKS */
  155         write_eflags(eflags);
  156 }
  157 
  158 
  159 /*
  160  * Cyrix 486S/DX series
  161  */
  162 static void
  163 init_cy486dx(void)
  164 {
  165         u_long  eflags;
  166         u_char  ccr2;
  167 
  168         eflags = read_eflags();
  169         disable_intr();
  170         invd();
  171 
  172         ccr2 = read_cyrix_reg(CCR2);
  173 #ifdef CPU_SUSP_HLT
  174         ccr2 |= CCR2_SUSP_HLT;
  175 #endif
  176 
  177 #ifdef PC98
  178         /* Enables WB cache interface pin and Lock NW bit in CR0. */
  179         ccr2 |= CCR2_WB | CCR2_LOCK_NW;
  180         /* Unlock NW bit in CR0. */
  181         write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
  182         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  183 #endif
  184 
  185         write_cyrix_reg(CCR2, ccr2);
  186         write_eflags(eflags);
  187 }
  188 
  189 
  190 /*
  191  * Cyrix 5x86
  192  */
  193 static void
  194 init_5x86(void)
  195 {
  196         u_long  eflags;
  197         u_char  ccr2, ccr3, ccr4, pcr0;
  198 
  199         eflags = read_eflags();
  200         disable_intr();
  201 
  202         load_cr0(rcr0() | CR0_CD | CR0_NW);
  203         wbinvd();
  204 
  205         (void)read_cyrix_reg(CCR3);             /* dummy */
  206 
  207         /* Initialize CCR2. */
  208         ccr2 = read_cyrix_reg(CCR2);
  209         ccr2 |= CCR2_WB;
  210 #ifdef CPU_SUSP_HLT
  211         ccr2 |= CCR2_SUSP_HLT;
  212 #else
  213         ccr2 &= ~CCR2_SUSP_HLT;
  214 #endif
  215         ccr2 |= CCR2_WT1;
  216         write_cyrix_reg(CCR2, ccr2);
  217 
  218         /* Initialize CCR4. */
  219         ccr3 = read_cyrix_reg(CCR3);
  220         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  221 
  222         ccr4 = read_cyrix_reg(CCR4);
  223         ccr4 |= CCR4_DTE;
  224         ccr4 |= CCR4_MEM;
  225 #ifdef CPU_FASTER_5X86_FPU
  226         ccr4 |= CCR4_FASTFPE;
  227 #else
  228         ccr4 &= ~CCR4_FASTFPE;
  229 #endif
  230         ccr4 &= ~CCR4_IOMASK;
  231         /********************************************************************
  232          * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
  233          * should be 0 for errata fix.
  234          ********************************************************************/
  235 #ifdef CPU_IORT
  236         ccr4 |= CPU_IORT & CCR4_IOMASK;
  237 #endif
  238         write_cyrix_reg(CCR4, ccr4);
  239 
  240         /* Initialize PCR0. */
  241         /****************************************************************
  242          * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
  243          * BTB_EN might make your system unstable.
  244          ****************************************************************/
  245         pcr0 = read_cyrix_reg(PCR0);
  246 #ifdef CPU_RSTK_EN
  247         pcr0 |= PCR0_RSTK;
  248 #else
  249         pcr0 &= ~PCR0_RSTK;
  250 #endif
  251 #ifdef CPU_BTB_EN
  252         pcr0 |= PCR0_BTB;
  253 #else
  254         pcr0 &= ~PCR0_BTB;
  255 #endif
  256 #ifdef CPU_LOOP_EN
  257         pcr0 |= PCR0_LOOP;
  258 #else
  259         pcr0 &= ~PCR0_LOOP;
  260 #endif
  261 
  262         /****************************************************************
  263          * WARNING: if you use a memory mapped I/O device, don't use
  264          * DISABLE_5X86_LSSER option, which may reorder memory mapped
  265          * I/O access.
  266          * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
  267          ****************************************************************/
  268 #ifdef CPU_DISABLE_5X86_LSSER
  269         pcr0 &= ~PCR0_LSSER;
  270 #else
  271         pcr0 |= PCR0_LSSER;
  272 #endif
  273         write_cyrix_reg(PCR0, pcr0);
  274 
  275         /* Restore CCR3. */
  276         write_cyrix_reg(CCR3, ccr3);
  277 
  278         (void)read_cyrix_reg(0x80);             /* dummy */
  279 
  280         /* Unlock NW bit in CR0. */
  281         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  282         load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0, NW = 1 */
  283         /* Lock NW bit in CR0. */
  284         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  285 
  286         write_eflags(eflags);
  287 }
  288 
  289 #ifdef CPU_I486_ON_386
  290 /*
  291  * There are i486 based upgrade products for i386 machines.
  292  * In this case, BIOS doesn't enables CPU cache.
  293  */
  294 void
  295 init_i486_on_386(void)
  296 {
  297         u_long  eflags;
  298 
  299 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  300         need_post_dma_flush = 1;
  301 #endif
  302 
  303         eflags = read_eflags();
  304         disable_intr();
  305 
  306         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0, NW = 0 */
  307 
  308         write_eflags(eflags);
  309 }
  310 #endif
  311 
  312 /*
  313  * Cyrix 6x86
  314  *
  315  * XXX - What should I do here?  Please let me know.
  316  */
  317 static void
  318 init_6x86(void)
  319 {
  320         u_long  eflags;
  321         u_char  ccr3, ccr4;
  322 
  323         eflags = read_eflags();
  324         disable_intr();
  325 
  326         load_cr0(rcr0() | CR0_CD | CR0_NW);
  327         wbinvd();
  328 
  329         /* Initialize CCR0. */
  330         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  331 
  332         /* Initialize CCR1. */
  333 #ifdef CPU_CYRIX_NO_LOCK
  334         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  335 #else
  336         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  337 #endif
  338 
  339         /* Initialize CCR2. */
  340 #ifdef CPU_SUSP_HLT
  341         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  342 #else
  343         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  344 #endif
  345 
  346         ccr3 = read_cyrix_reg(CCR3);
  347         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  348 
  349         /* Initialize CCR4. */
  350         ccr4 = read_cyrix_reg(CCR4);
  351         ccr4 |= CCR4_DTE;
  352         ccr4 &= ~CCR4_IOMASK;
  353 #ifdef CPU_IORT
  354         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  355 #else
  356         write_cyrix_reg(CCR4, ccr4 | 7);
  357 #endif
  358 
  359         /* Initialize CCR5. */
  360 #ifdef CPU_WT_ALLOC
  361         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  362 #endif
  363 
  364         /* Restore CCR3. */
  365         write_cyrix_reg(CCR3, ccr3);
  366 
  367         /* Unlock NW bit in CR0. */
  368         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  369 
  370         /*
  371          * Earlier revision of the 6x86 CPU could crash the system if
  372          * L1 cache is in write-back mode.
  373          */
  374         if ((cyrix_did & 0xff00) > 0x1600)
  375                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  376         else {
  377                 /* Revision 2.6 and lower. */
  378 #ifdef CYRIX_CACHE_REALLY_WORKS
  379                 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  380 #else
  381                 load_cr0((rcr0() & ~CR0_CD) | CR0_NW);  /* CD = 0 and NW = 1 */
  382 #endif
  383         }
  384 
  385         /* Lock NW bit in CR0. */
  386         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  387 
  388         write_eflags(eflags);
  389 }
  390 #endif /* I486_CPU */
  391 
  392 #ifdef I686_CPU
  393 /*
  394  * Cyrix 6x86MX (code-named M2)
  395  *
  396  * XXX - What should I do here?  Please let me know.
  397  */
  398 static void
  399 init_6x86MX(void)
  400 {
  401         u_long  eflags;
  402         u_char  ccr3, ccr4;
  403 
  404         eflags = read_eflags();
  405         disable_intr();
  406 
  407         load_cr0(rcr0() | CR0_CD | CR0_NW);
  408         wbinvd();
  409 
  410         /* Initialize CCR0. */
  411         write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
  412 
  413         /* Initialize CCR1. */
  414 #ifdef CPU_CYRIX_NO_LOCK
  415         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
  416 #else
  417         write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
  418 #endif
  419 
  420         /* Initialize CCR2. */
  421 #ifdef CPU_SUSP_HLT
  422         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
  423 #else
  424         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
  425 #endif
  426 
  427         ccr3 = read_cyrix_reg(CCR3);
  428         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  429 
  430         /* Initialize CCR4. */
  431         ccr4 = read_cyrix_reg(CCR4);
  432         ccr4 &= ~CCR4_IOMASK;
  433 #ifdef CPU_IORT
  434         write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
  435 #else
  436         write_cyrix_reg(CCR4, ccr4 | 7);
  437 #endif
  438 
  439         /* Initialize CCR5. */
  440 #ifdef CPU_WT_ALLOC
  441         write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
  442 #endif
  443 
  444         /* Restore CCR3. */
  445         write_cyrix_reg(CCR3, ccr3);
  446 
  447         /* Unlock NW bit in CR0. */
  448         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
  449 
  450         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));  /* CD = 0 and NW = 0 */
  451 
  452         /* Lock NW bit in CR0. */
  453         write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
  454 
  455         write_eflags(eflags);
  456 }
  457 
  458 static void
  459 init_ppro(void)
  460 {
  461 #ifndef SMP
  462         u_int64_t       apicbase;
  463 
  464         /*
  465          * Local APIC should be diabled in UP kernel.
  466          */
  467         apicbase = rdmsr(0x1b);
  468         apicbase &= ~0x800LL;
  469         wrmsr(0x1b, apicbase);
  470 #endif
  471 }
  472 
  473 /*
  474  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
  475  * L2 cache).
  476  */
  477 void
  478 init_mendocino(void)
  479 {
  480 #ifdef CPU_PPRO2CELERON
  481         u_long  eflags;
  482         u_int64_t       bbl_cr_ctl3;
  483 
  484         eflags = read_eflags();
  485         disable_intr();
  486 
  487         load_cr0(rcr0() | CR0_CD | CR0_NW);
  488         wbinvd();
  489 
  490         bbl_cr_ctl3 = rdmsr(0x11e);
  491 
  492         /* If the L2 cache is configured, do nothing. */
  493         if (!(bbl_cr_ctl3 & 1)) {
  494                 bbl_cr_ctl3 = 0x134052bLL;
  495 
  496                 /* Set L2 Cache Latency (Default: 5). */
  497 #ifdef  CPU_CELERON_L2_LATENCY
  498 #if CPU_L2_LATENCY > 15
  499 #error invalid CPU_L2_LATENCY.
  500 #endif
  501                 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
  502 #else
  503                 bbl_cr_ctl3 |= 5 << 1;
  504 #endif
  505                 wrmsr(0x11e, bbl_cr_ctl3);
  506         }
  507 
  508         load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
  509         write_eflags(eflags);
  510 #endif /* CPU_PPRO2CELERON */
  511 }
  512 
  513 #endif /* I686_CPU */
  514 
  515 /*
  516  * Initialize CR4 (Control register 4) to enable SSE instructions.
  517  */
  518 void
  519 enable_sse(void)
  520 {
  521 #if defined(CPU_ENABLE_SSE)
  522         if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
  523                 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
  524                 cpu_fxsr = hw_instruction_sse = 1;
  525         }
  526 #endif
  527 }
  528 
  529 void
  530 initializecpu(void)
  531 {
  532 
  533         switch (cpu) {
  534 #ifdef I486_CPU
  535         case CPU_BLUE:
  536                 init_bluelightning();
  537                 break;
  538         case CPU_486DLC:
  539                 init_486dlc();
  540                 break;
  541         case CPU_CY486DX:
  542                 init_cy486dx();
  543                 break;
  544         case CPU_M1SC:
  545                 init_5x86();
  546                 break;
  547 #ifdef CPU_I486_ON_386
  548         case CPU_486:
  549                 init_i486_on_386();
  550                 break;
  551 #endif
  552         case CPU_M1:
  553                 init_6x86();
  554                 break;
  555 #endif /* I486_CPU */
  556 #ifdef I686_CPU
  557         case CPU_M2:
  558                 init_6x86MX();
  559                 break;
  560         case CPU_686:
  561                 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
  562                         switch (cpu_id & 0xff0) {
  563                         case 0x610:
  564                                 init_ppro();
  565                                 break;
  566                         case 0x660:
  567                                 init_mendocino();
  568                                 break;
  569                         }
  570                 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
  571 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
  572                         /*
  573                          * Sometimes the BIOS doesn't enable SSE instructions.
  574                          * According to AMD document 20734, the mobile
  575                          * Duron, the (mobile) Athlon 4 and the Athlon MP
  576                          * support SSE. These correspond to cpu_id 0x66X
  577                          * or 0x67X.
  578                          */
  579                         if ((cpu_feature & CPUID_XMM) == 0 &&
  580                             ((cpu_id & ~0xf) == 0x660 ||
  581                              (cpu_id & ~0xf) == 0x670 ||
  582                              (cpu_id & ~0xf) == 0x680)) {
  583                                 u_int regs[4];
  584                                 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
  585                                 do_cpuid(1, regs);
  586                                 cpu_feature = regs[3];
  587                         }
  588 #endif
  589                 }
  590                 break;
  591 #endif
  592         default:
  593                 break;
  594         }
  595         enable_sse();
  596 
  597 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
  598         /*
  599          * OS should flush L1 cache by itself because no PC-98 supports
  600          * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
  601          * when need_pre_dma_flush = 1, use invd instruction after DMA
  602          * transfer when need_post_dma_flush = 1.  If your CPU upgrade
  603          * product supports hardware cache control, you can add the
  604          * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
  605          * This option eliminates unneeded cache flush instruction(s).
  606          */
  607         if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
  608                 switch (cpu) {
  609 #ifdef I486_CPU
  610                 case CPU_486DLC:
  611                         need_post_dma_flush = 1;
  612                         break;
  613                 case CPU_M1SC:
  614                         need_pre_dma_flush = 1;
  615                         break;
  616                 case CPU_CY486DX:
  617                         need_pre_dma_flush = 1;
  618 #ifdef CPU_I486_ON_386
  619                         need_post_dma_flush = 1;
  620 #endif
  621                         break;
  622 #endif
  623                 default:
  624                         break;
  625                 }
  626         } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
  627                 switch (cpu_id & 0xFF0) {
  628                 case 0x470:             /* Enhanced Am486DX2 WB */
  629                 case 0x490:             /* Enhanced Am486DX4 WB */
  630                 case 0x4F0:             /* Am5x86 WB */
  631                         need_pre_dma_flush = 1;
  632                         break;
  633                 }
  634         } else if (strcmp(cpu_vendor, "IBM") == 0) {
  635                 need_post_dma_flush = 1;
  636         } else {
  637 #ifdef CPU_I486_ON_386
  638                 need_pre_dma_flush = 1;
  639 #endif
  640         }
  641 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
  642 }
  643 
  644 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
  645 /*
  646  * Enable write allocate feature of AMD processors.
  647  * Following two functions require the Maxmem variable being set.
  648  */
  649 void
  650 enable_K5_wt_alloc(void)
  651 {
  652         u_int64_t       msr;
  653 
  654         /*
  655          * Write allocate is supported only on models 1, 2, and 3, with
  656          * a stepping of 4 or greater.
  657          */
  658         if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
  659                 disable_intr();
  660                 msr = rdmsr(0x83);              /* HWCR */
  661                 wrmsr(0x83, msr & !(0x10));
  662 
  663                 /*
  664                  * We have to tell the chip where the top of memory is,
  665                  * since video cards could have frame bufferes there,
  666                  * memory-mapped I/O could be there, etc.
  667                  */
  668                 if(Maxmem > 0)
  669                   msr = Maxmem / 16;
  670                 else
  671                   msr = 0;
  672                 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
  673 #ifdef PC98
  674                 if (!(inb(0x43b) & 4)) {
  675                         wrmsr(0x86, 0x0ff00f0);
  676                         msr |= AMD_WT_ALLOC_PRE;
  677                 }
  678 #else
  679                 /*
  680                  * There is no way to know wheter 15-16M hole exists or not. 
  681                  * Therefore, we disable write allocate for this range.
  682                  */
  683                         wrmsr(0x86, 0x0ff00f0);
  684                         msr |= AMD_WT_ALLOC_PRE;
  685 #endif
  686                 wrmsr(0x85, msr);
  687 
  688                 msr=rdmsr(0x83);
  689                 wrmsr(0x83, msr|0x10); /* enable write allocate */
  690 
  691                 enable_intr();
  692         }
  693 }
  694 
  695 void
  696 enable_K6_wt_alloc(void)
  697 {
  698         quad_t  size;
  699         u_int64_t       whcr;
  700         u_long  eflags;
  701 
  702         eflags = read_eflags();
  703         disable_intr();
  704         wbinvd();
  705 
  706 #ifdef CPU_DISABLE_CACHE
  707         /*
  708          * Certain K6-2 box becomes unstable when write allocation is
  709          * enabled.
  710          */
  711         /*
  712          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  713          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  714          * All other bits in TR12 have no effect on the processer's operation.
  715          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  716          * on the AMD-K6.
  717          */
  718         wrmsr(0x0000000e, (u_int64_t)0x0008);
  719 #endif
  720         /* Don't assume that memory size is aligned with 4M. */
  721         if (Maxmem > 0)
  722           size = ((Maxmem >> 8) + 3) >> 2;
  723         else
  724           size = 0;
  725 
  726         /* Limit is 508M bytes. */
  727         if (size > 0x7f)
  728                 size = 0x7f;
  729         whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
  730 
  731 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  732         if (whcr & (0x7fLL << 1)) {
  733 #ifdef PC98
  734                 /*
  735                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  736                  * 15-16M range.
  737                  */
  738                 if (!(inb(0x43b) & 4))
  739                         whcr &= ~0x0001LL;
  740                 else
  741 #endif
  742                         whcr |=  0x0001LL;
  743         }
  744 #else
  745         /*
  746          * There is no way to know wheter 15-16M hole exists or not. 
  747          * Therefore, we disable write allocate for this range.
  748          */
  749         whcr &= ~0x0001LL;
  750 #endif
  751         wrmsr(0x0c0000082, whcr);
  752 
  753         write_eflags(eflags);
  754         enable_intr();
  755 }
  756 
  757 void
  758 enable_K6_2_wt_alloc(void)
  759 {
  760         quad_t  size;
  761         u_int64_t       whcr;
  762         u_long  eflags;
  763 
  764         eflags = read_eflags();
  765         disable_intr();
  766         wbinvd();
  767 
  768 #ifdef CPU_DISABLE_CACHE
  769         /*
  770          * Certain K6-2 box becomes unstable when write allocation is
  771          * enabled.
  772          */
  773         /*
  774          * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
  775          * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
  776          * All other bits in TR12 have no effect on the processer's operation.
  777          * The I/O Trap Restart function (bit 9 of TR12) is always enabled
  778          * on the AMD-K6.
  779          */
  780         wrmsr(0x0000000e, (u_int64_t)0x0008);
  781 #endif
  782         /* Don't assume that memory size is aligned with 4M. */
  783         if (Maxmem > 0)
  784           size = ((Maxmem >> 8) + 3) >> 2;
  785         else
  786           size = 0;
  787 
  788         /* Limit is 4092M bytes. */
  789         if (size > 0x3fff)
  790                 size = 0x3ff;
  791         whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
  792 
  793 #if defined(PC98) || defined(NO_MEMORY_HOLE)
  794         if (whcr & (0x3ffLL << 22)) {
  795 #ifdef PC98
  796                 /*
  797                  * If bit 2 of port 0x43b is 0, disable wrte allocate for the
  798                  * 15-16M range.
  799                  */
  800                 if (!(inb(0x43b) & 4))
  801                         whcr &= ~(1LL << 16);
  802                 else
  803 #endif
  804                         whcr |=  1LL << 16;
  805         }
  806 #else
  807         /*
  808          * There is no way to know wheter 15-16M hole exists or not. 
  809          * Therefore, we disable write allocate for this range.
  810          */
  811         whcr &= ~(1LL << 16);
  812 #endif
  813         wrmsr(0x0c0000082, whcr);
  814 
  815         write_eflags(eflags);
  816         enable_intr();
  817 }
  818 #endif /* I585_CPU && CPU_WT_ALLOC */
  819 
  820 #include "opt_ddb.h"
  821 #ifdef DDB
  822 #include <ddb/ddb.h>
  823 
  824 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
  825 {
  826         u_long  eflags;
  827         u_int   cr0;
  828         u_char  ccr1, ccr2, ccr3;
  829         u_char  ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
  830 
  831         cr0 = rcr0();
  832         if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
  833                 eflags = read_eflags();
  834                 disable_intr();
  835 
  836 
  837                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
  838                         ccr0 = read_cyrix_reg(CCR0);
  839                 }
  840                 ccr1 = read_cyrix_reg(CCR1);
  841                 ccr2 = read_cyrix_reg(CCR2);
  842                 ccr3 = read_cyrix_reg(CCR3);
  843                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
  844                         write_cyrix_reg(CCR3, CCR3_MAPEN0);
  845                         ccr4 = read_cyrix_reg(CCR4);
  846                         if ((cpu == CPU_M1) || (cpu == CPU_M2))
  847                                 ccr5 = read_cyrix_reg(CCR5);
  848                         else
  849                                 pcr0 = read_cyrix_reg(PCR0);
  850                         write_cyrix_reg(CCR3, ccr3);            /* Restore CCR3. */
  851                 }
  852                 write_eflags(eflags);
  853 
  854                 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
  855                         printf("CCR0=%x, ", (u_int)ccr0);
  856 
  857                 printf("CCR1=%x, CCR2=%x, CCR3=%x",
  858                         (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
  859                 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
  860                         printf(", CCR4=%x, ", (u_int)ccr4);
  861                         if (cpu == CPU_M1SC)
  862                                 printf("PCR0=%x\n", pcr0);
  863                         else
  864                                 printf("CCR5=%x\n", ccr5);
  865                 }
  866         }
  867         printf("CR0=%x\n", cr0);
  868 }
  869 #endif /* DDB */

Cache object: c224d94a2c130153008a176db28d509e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.