1 /*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.4/sys/i386/i386/initcpu.c 219486 2011-03-11 14:53:34Z avg $");
32
33 #include "opt_cpu.h"
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
39
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
43
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
49 #endif
50
51 void initializecpu(void);
52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
53 void enable_K5_wt_alloc(void);
54 void enable_K6_wt_alloc(void);
55 void enable_K6_2_wt_alloc(void);
56 #endif
57
58 #ifdef I486_CPU
59 static void init_5x86(void);
60 static void init_bluelightning(void);
61 static void init_486dlc(void);
62 static void init_cy486dx(void);
63 #ifdef CPU_I486_ON_386
64 static void init_i486_on_386(void);
65 #endif
66 static void init_6x86(void);
67 #endif /* I486_CPU */
68
69 #ifdef I686_CPU
70 static void init_6x86MX(void);
71 static void init_ppro(void);
72 static void init_mendocino(void);
73 #endif
74
75 static int hw_instruction_sse;
76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
77 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
78 /*
79 * -1: automatic (default)
80 * 0: keep enable CLFLUSH
81 * 1: force disable CLFLUSH
82 */
83 static int hw_clflush_disable = -1;
84
85 /* Must *NOT* be BSS or locore will bzero these after setting them */
86 int cpu = 0; /* Are we 386, 386sx, 486, etc? */
87 u_int cpu_feature = 0; /* Feature flags */
88 u_int cpu_feature2 = 0; /* Feature flags */
89 u_int amd_feature = 0; /* AMD feature flags */
90 u_int amd_feature2 = 0; /* AMD feature flags */
91 u_int amd_pminfo = 0; /* AMD advanced power management info */
92 u_int via_feature_rng = 0; /* VIA RNG features */
93 u_int via_feature_xcrypt = 0; /* VIA ACE features */
94 u_int cpu_high = 0; /* Highest arg to CPUID */
95 u_int cpu_id = 0; /* Stepping ID */
96 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
97 u_int cpu_procinfo2 = 0; /* Multicore info */
98 char cpu_vendor[20] = ""; /* CPU Origin code */
99 u_int cpu_vendor_id = 0; /* CPU vendor ID */
100 u_int cpu_clflush_line_size = 32;
101
102 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
103 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
104 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
105 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
106
107 #ifdef CPU_ENABLE_SSE
108 u_int cpu_fxsr; /* SSE enabled */
109 u_int cpu_mxcsr_mask; /* valid bits in mxcsr */
110 #endif
111
112 #ifdef I486_CPU
113 /*
114 * IBM Blue Lightning
115 */
116 static void
117 init_bluelightning(void)
118 {
119 register_t saveintr;
120
121 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
122 need_post_dma_flush = 1;
123 #endif
124
125 saveintr = intr_disable();
126
127 load_cr0(rcr0() | CR0_CD | CR0_NW);
128 invd();
129
130 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
131 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
132 #else
133 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
134 #endif
135 /* Enables 13MB and 0-640KB cache. */
136 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
137 #ifdef CPU_BLUELIGHTNING_3X
138 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
139 #else
140 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
141 #endif
142
143 /* Enable caching in CR0. */
144 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
145 invd();
146 intr_restore(saveintr);
147 }
148
149 /*
150 * Cyrix 486SLC/DLC/SR/DR series
151 */
152 static void
153 init_486dlc(void)
154 {
155 register_t saveintr;
156 u_char ccr0;
157
158 saveintr = intr_disable();
159 invd();
160
161 ccr0 = read_cyrix_reg(CCR0);
162 #ifndef CYRIX_CACHE_WORKS
163 ccr0 |= CCR0_NC1 | CCR0_BARB;
164 write_cyrix_reg(CCR0, ccr0);
165 invd();
166 #else
167 ccr0 &= ~CCR0_NC0;
168 #ifndef CYRIX_CACHE_REALLY_WORKS
169 ccr0 |= CCR0_NC1 | CCR0_BARB;
170 #else
171 ccr0 |= CCR0_NC1;
172 #endif
173 #ifdef CPU_DIRECT_MAPPED_CACHE
174 ccr0 |= CCR0_CO; /* Direct mapped mode. */
175 #endif
176 write_cyrix_reg(CCR0, ccr0);
177
178 /* Clear non-cacheable region. */
179 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
180 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
181 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
182 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
183
184 write_cyrix_reg(0, 0); /* dummy write */
185
186 /* Enable caching in CR0. */
187 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
188 invd();
189 #endif /* !CYRIX_CACHE_WORKS */
190 intr_restore(saveintr);
191 }
192
193
194 /*
195 * Cyrix 486S/DX series
196 */
197 static void
198 init_cy486dx(void)
199 {
200 register_t saveintr;
201 u_char ccr2;
202
203 saveintr = intr_disable();
204 invd();
205
206 ccr2 = read_cyrix_reg(CCR2);
207 #ifdef CPU_SUSP_HLT
208 ccr2 |= CCR2_SUSP_HLT;
209 #endif
210
211 #ifdef PC98
212 /* Enables WB cache interface pin and Lock NW bit in CR0. */
213 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
214 /* Unlock NW bit in CR0. */
215 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
216 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
217 #endif
218
219 write_cyrix_reg(CCR2, ccr2);
220 intr_restore(saveintr);
221 }
222
223
224 /*
225 * Cyrix 5x86
226 */
227 static void
228 init_5x86(void)
229 {
230 register_t saveintr;
231 u_char ccr2, ccr3, ccr4, pcr0;
232
233 saveintr = intr_disable();
234
235 load_cr0(rcr0() | CR0_CD | CR0_NW);
236 wbinvd();
237
238 (void)read_cyrix_reg(CCR3); /* dummy */
239
240 /* Initialize CCR2. */
241 ccr2 = read_cyrix_reg(CCR2);
242 ccr2 |= CCR2_WB;
243 #ifdef CPU_SUSP_HLT
244 ccr2 |= CCR2_SUSP_HLT;
245 #else
246 ccr2 &= ~CCR2_SUSP_HLT;
247 #endif
248 ccr2 |= CCR2_WT1;
249 write_cyrix_reg(CCR2, ccr2);
250
251 /* Initialize CCR4. */
252 ccr3 = read_cyrix_reg(CCR3);
253 write_cyrix_reg(CCR3, CCR3_MAPEN0);
254
255 ccr4 = read_cyrix_reg(CCR4);
256 ccr4 |= CCR4_DTE;
257 ccr4 |= CCR4_MEM;
258 #ifdef CPU_FASTER_5X86_FPU
259 ccr4 |= CCR4_FASTFPE;
260 #else
261 ccr4 &= ~CCR4_FASTFPE;
262 #endif
263 ccr4 &= ~CCR4_IOMASK;
264 /********************************************************************
265 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
266 * should be 0 for errata fix.
267 ********************************************************************/
268 #ifdef CPU_IORT
269 ccr4 |= CPU_IORT & CCR4_IOMASK;
270 #endif
271 write_cyrix_reg(CCR4, ccr4);
272
273 /* Initialize PCR0. */
274 /****************************************************************
275 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
276 * BTB_EN might make your system unstable.
277 ****************************************************************/
278 pcr0 = read_cyrix_reg(PCR0);
279 #ifdef CPU_RSTK_EN
280 pcr0 |= PCR0_RSTK;
281 #else
282 pcr0 &= ~PCR0_RSTK;
283 #endif
284 #ifdef CPU_BTB_EN
285 pcr0 |= PCR0_BTB;
286 #else
287 pcr0 &= ~PCR0_BTB;
288 #endif
289 #ifdef CPU_LOOP_EN
290 pcr0 |= PCR0_LOOP;
291 #else
292 pcr0 &= ~PCR0_LOOP;
293 #endif
294
295 /****************************************************************
296 * WARNING: if you use a memory mapped I/O device, don't use
297 * DISABLE_5X86_LSSER option, which may reorder memory mapped
298 * I/O access.
299 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
300 ****************************************************************/
301 #ifdef CPU_DISABLE_5X86_LSSER
302 pcr0 &= ~PCR0_LSSER;
303 #else
304 pcr0 |= PCR0_LSSER;
305 #endif
306 write_cyrix_reg(PCR0, pcr0);
307
308 /* Restore CCR3. */
309 write_cyrix_reg(CCR3, ccr3);
310
311 (void)read_cyrix_reg(0x80); /* dummy */
312
313 /* Unlock NW bit in CR0. */
314 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
315 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
316 /* Lock NW bit in CR0. */
317 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
318
319 intr_restore(saveintr);
320 }
321
322 #ifdef CPU_I486_ON_386
323 /*
324 * There are i486 based upgrade products for i386 machines.
325 * In this case, BIOS doesn't enable CPU cache.
326 */
327 static void
328 init_i486_on_386(void)
329 {
330 register_t saveintr;
331
332 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
333 need_post_dma_flush = 1;
334 #endif
335
336 saveintr = intr_disable();
337
338 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
339
340 intr_restore(saveintr);
341 }
342 #endif
343
344 /*
345 * Cyrix 6x86
346 *
347 * XXX - What should I do here? Please let me know.
348 */
349 static void
350 init_6x86(void)
351 {
352 register_t saveintr;
353 u_char ccr3, ccr4;
354
355 saveintr = intr_disable();
356
357 load_cr0(rcr0() | CR0_CD | CR0_NW);
358 wbinvd();
359
360 /* Initialize CCR0. */
361 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
362
363 /* Initialize CCR1. */
364 #ifdef CPU_CYRIX_NO_LOCK
365 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
366 #else
367 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
368 #endif
369
370 /* Initialize CCR2. */
371 #ifdef CPU_SUSP_HLT
372 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
373 #else
374 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
375 #endif
376
377 ccr3 = read_cyrix_reg(CCR3);
378 write_cyrix_reg(CCR3, CCR3_MAPEN0);
379
380 /* Initialize CCR4. */
381 ccr4 = read_cyrix_reg(CCR4);
382 ccr4 |= CCR4_DTE;
383 ccr4 &= ~CCR4_IOMASK;
384 #ifdef CPU_IORT
385 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
386 #else
387 write_cyrix_reg(CCR4, ccr4 | 7);
388 #endif
389
390 /* Initialize CCR5. */
391 #ifdef CPU_WT_ALLOC
392 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
393 #endif
394
395 /* Restore CCR3. */
396 write_cyrix_reg(CCR3, ccr3);
397
398 /* Unlock NW bit in CR0. */
399 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
400
401 /*
402 * Earlier revision of the 6x86 CPU could crash the system if
403 * L1 cache is in write-back mode.
404 */
405 if ((cyrix_did & 0xff00) > 0x1600)
406 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
407 else {
408 /* Revision 2.6 and lower. */
409 #ifdef CYRIX_CACHE_REALLY_WORKS
410 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
411 #else
412 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
413 #endif
414 }
415
416 /* Lock NW bit in CR0. */
417 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
418
419 intr_restore(saveintr);
420 }
421 #endif /* I486_CPU */
422
423 #ifdef I686_CPU
424 /*
425 * Cyrix 6x86MX (code-named M2)
426 *
427 * XXX - What should I do here? Please let me know.
428 */
429 static void
430 init_6x86MX(void)
431 {
432 register_t saveintr;
433 u_char ccr3, ccr4;
434
435 saveintr = intr_disable();
436
437 load_cr0(rcr0() | CR0_CD | CR0_NW);
438 wbinvd();
439
440 /* Initialize CCR0. */
441 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
442
443 /* Initialize CCR1. */
444 #ifdef CPU_CYRIX_NO_LOCK
445 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
446 #else
447 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
448 #endif
449
450 /* Initialize CCR2. */
451 #ifdef CPU_SUSP_HLT
452 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
453 #else
454 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
455 #endif
456
457 ccr3 = read_cyrix_reg(CCR3);
458 write_cyrix_reg(CCR3, CCR3_MAPEN0);
459
460 /* Initialize CCR4. */
461 ccr4 = read_cyrix_reg(CCR4);
462 ccr4 &= ~CCR4_IOMASK;
463 #ifdef CPU_IORT
464 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
465 #else
466 write_cyrix_reg(CCR4, ccr4 | 7);
467 #endif
468
469 /* Initialize CCR5. */
470 #ifdef CPU_WT_ALLOC
471 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
472 #endif
473
474 /* Restore CCR3. */
475 write_cyrix_reg(CCR3, ccr3);
476
477 /* Unlock NW bit in CR0. */
478 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
479
480 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
481
482 /* Lock NW bit in CR0. */
483 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
484
485 intr_restore(saveintr);
486 }
487
488 static void
489 init_ppro(void)
490 {
491 u_int64_t apicbase;
492
493 /*
494 * Local APIC should be disabled if it is not going to be used.
495 */
496 apicbase = rdmsr(MSR_APICBASE);
497 apicbase &= ~APICBASE_ENABLED;
498 wrmsr(MSR_APICBASE, apicbase);
499 }
500
501 /*
502 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
503 * L2 cache).
504 */
505 static void
506 init_mendocino(void)
507 {
508 #ifdef CPU_PPRO2CELERON
509 register_t saveintr;
510 u_int64_t bbl_cr_ctl3;
511
512 saveintr = intr_disable();
513
514 load_cr0(rcr0() | CR0_CD | CR0_NW);
515 wbinvd();
516
517 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
518
519 /* If the L2 cache is configured, do nothing. */
520 if (!(bbl_cr_ctl3 & 1)) {
521 bbl_cr_ctl3 = 0x134052bLL;
522
523 /* Set L2 Cache Latency (Default: 5). */
524 #ifdef CPU_CELERON_L2_LATENCY
525 #if CPU_L2_LATENCY > 15
526 #error invalid CPU_L2_LATENCY.
527 #endif
528 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
529 #else
530 bbl_cr_ctl3 |= 5 << 1;
531 #endif
532 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
533 }
534
535 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
536 intr_restore(saveintr);
537 #endif /* CPU_PPRO2CELERON */
538 }
539
540 /*
541 * Initialize special VIA C3/C7 features
542 */
543 static void
544 init_via(void)
545 {
546 u_int regs[4], val;
547 u_int64_t msreg;
548
549 do_cpuid(0xc0000000, regs);
550 val = regs[0];
551 if (val >= 0xc0000001) {
552 do_cpuid(0xc0000001, regs);
553 val = regs[3];
554 } else
555 val = 0;
556
557 /* Enable RNG if present and disabled */
558 if (val & VIA_CPUID_HAS_RNG) {
559 if (!(val & VIA_CPUID_DO_RNG)) {
560 msreg = rdmsr(0x110B);
561 msreg |= 0x40;
562 wrmsr(0x110B, msreg);
563 }
564 via_feature_rng = VIA_HAS_RNG;
565 }
566 /* Enable AES engine if present and disabled */
567 if (val & VIA_CPUID_HAS_ACE) {
568 if (!(val & VIA_CPUID_DO_ACE)) {
569 msreg = rdmsr(0x1107);
570 msreg |= (0x01 << 28);
571 wrmsr(0x1107, msreg);
572 }
573 via_feature_xcrypt |= VIA_HAS_AES;
574 }
575 /* Enable ACE2 engine if present and disabled */
576 if (val & VIA_CPUID_HAS_ACE2) {
577 if (!(val & VIA_CPUID_DO_ACE2)) {
578 msreg = rdmsr(0x1107);
579 msreg |= (0x01 << 28);
580 wrmsr(0x1107, msreg);
581 }
582 via_feature_xcrypt |= VIA_HAS_AESCTR;
583 }
584 /* Enable SHA engine if present and disabled */
585 if (val & VIA_CPUID_HAS_PHE) {
586 if (!(val & VIA_CPUID_DO_PHE)) {
587 msreg = rdmsr(0x1107);
588 msreg |= (0x01 << 28/**/);
589 wrmsr(0x1107, msreg);
590 }
591 via_feature_xcrypt |= VIA_HAS_SHA;
592 }
593 /* Enable MM engine if present and disabled */
594 if (val & VIA_CPUID_HAS_PMM) {
595 if (!(val & VIA_CPUID_DO_PMM)) {
596 msreg = rdmsr(0x1107);
597 msreg |= (0x01 << 28/**/);
598 wrmsr(0x1107, msreg);
599 }
600 via_feature_xcrypt |= VIA_HAS_MM;
601 }
602 }
603
604 #endif /* I686_CPU */
605
606 /*
607 * Initialize CR4 (Control register 4) to enable SSE instructions.
608 */
609 void
610 enable_sse(void)
611 {
612 #if defined(CPU_ENABLE_SSE)
613 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
614 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
615 cpu_fxsr = hw_instruction_sse = 1;
616 }
617 #endif
618 }
619
620 void
621 initializecpu(void)
622 {
623
624 switch (cpu) {
625 #ifdef I486_CPU
626 case CPU_BLUE:
627 init_bluelightning();
628 break;
629 case CPU_486DLC:
630 init_486dlc();
631 break;
632 case CPU_CY486DX:
633 init_cy486dx();
634 break;
635 case CPU_M1SC:
636 init_5x86();
637 break;
638 #ifdef CPU_I486_ON_386
639 case CPU_486:
640 init_i486_on_386();
641 break;
642 #endif
643 case CPU_M1:
644 init_6x86();
645 break;
646 #endif /* I486_CPU */
647 #ifdef I686_CPU
648 case CPU_M2:
649 init_6x86MX();
650 break;
651 case CPU_686:
652 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
653 switch (cpu_id & 0xff0) {
654 case 0x610:
655 init_ppro();
656 break;
657 case 0x660:
658 init_mendocino();
659 break;
660 }
661 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
662 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
663 /*
664 * Sometimes the BIOS doesn't enable SSE instructions.
665 * According to AMD document 20734, the mobile
666 * Duron, the (mobile) Athlon 4 and the Athlon MP
667 * support SSE. These correspond to cpu_id 0x66X
668 * or 0x67X.
669 */
670 if ((cpu_feature & CPUID_XMM) == 0 &&
671 ((cpu_id & ~0xf) == 0x660 ||
672 (cpu_id & ~0xf) == 0x670 ||
673 (cpu_id & ~0xf) == 0x680)) {
674 u_int regs[4];
675 wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
676 do_cpuid(1, regs);
677 cpu_feature = regs[3];
678 }
679 #endif
680 } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
681 switch (cpu_id & 0xff0) {
682 case 0x690:
683 if ((cpu_id & 0xf) < 3)
684 break;
685 /* fall through. */
686 case 0x6a0:
687 case 0x6d0:
688 case 0x6f0:
689 init_via();
690 break;
691 default:
692 break;
693 }
694 }
695 #ifdef PAE
696 if ((amd_feature & AMDID_NX) != 0) {
697 uint64_t msr;
698
699 msr = rdmsr(MSR_EFER) | EFER_NXE;
700 wrmsr(MSR_EFER, msr);
701 pg_nx = PG_NX;
702 }
703 #endif
704 break;
705 #endif
706 default:
707 break;
708 }
709 enable_sse();
710
711 /*
712 * CPUID with %eax = 1, %ebx returns
713 * Bits 15-8: CLFLUSH line size
714 * (Value * 8 = cache line size in bytes)
715 */
716 if ((cpu_feature & CPUID_CLFSH) != 0)
717 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
718 /*
719 * XXXKIB: (temporary) hack to work around traps generated
720 * when CLFLUSHing APIC register window under virtualization
721 * environments. These environments tend to disable the
722 * CPUID_SS feature even though the native CPU supports it.
723 */
724 TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
725 if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1)
726 cpu_feature &= ~CPUID_CLFSH;
727 /*
728 * Allow to disable CLFLUSH feature manually by
729 * hw.clflush_disable tunable.
730 */
731 if (hw_clflush_disable == 1)
732 cpu_feature &= ~CPUID_CLFSH;
733
734 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
735 /*
736 * OS should flush L1 cache by itself because no PC-98 supports
737 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
738 * when need_pre_dma_flush = 1, use invd instruction after DMA
739 * transfer when need_post_dma_flush = 1. If your CPU upgrade
740 * product supports hardware cache control, you can add the
741 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
742 * This option eliminates unneeded cache flush instruction(s).
743 */
744 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
745 switch (cpu) {
746 #ifdef I486_CPU
747 case CPU_486DLC:
748 need_post_dma_flush = 1;
749 break;
750 case CPU_M1SC:
751 need_pre_dma_flush = 1;
752 break;
753 case CPU_CY486DX:
754 need_pre_dma_flush = 1;
755 #ifdef CPU_I486_ON_386
756 need_post_dma_flush = 1;
757 #endif
758 break;
759 #endif
760 default:
761 break;
762 }
763 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
764 switch (cpu_id & 0xFF0) {
765 case 0x470: /* Enhanced Am486DX2 WB */
766 case 0x490: /* Enhanced Am486DX4 WB */
767 case 0x4F0: /* Am5x86 WB */
768 need_pre_dma_flush = 1;
769 break;
770 }
771 } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
772 need_post_dma_flush = 1;
773 } else {
774 #ifdef CPU_I486_ON_386
775 need_pre_dma_flush = 1;
776 #endif
777 }
778 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
779 }
780
781 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
782 /*
783 * Enable write allocate feature of AMD processors.
784 * Following two functions require the Maxmem variable being set.
785 */
786 void
787 enable_K5_wt_alloc(void)
788 {
789 u_int64_t msr;
790 register_t saveintr;
791
792 /*
793 * Write allocate is supported only on models 1, 2, and 3, with
794 * a stepping of 4 or greater.
795 */
796 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
797 saveintr = intr_disable();
798 msr = rdmsr(0x83); /* HWCR */
799 wrmsr(0x83, msr & !(0x10));
800
801 /*
802 * We have to tell the chip where the top of memory is,
803 * since video cards could have frame bufferes there,
804 * memory-mapped I/O could be there, etc.
805 */
806 if(Maxmem > 0)
807 msr = Maxmem / 16;
808 else
809 msr = 0;
810 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
811 #ifdef PC98
812 if (!(inb(0x43b) & 4)) {
813 wrmsr(0x86, 0x0ff00f0);
814 msr |= AMD_WT_ALLOC_PRE;
815 }
816 #else
817 /*
818 * There is no way to know wheter 15-16M hole exists or not.
819 * Therefore, we disable write allocate for this range.
820 */
821 wrmsr(0x86, 0x0ff00f0);
822 msr |= AMD_WT_ALLOC_PRE;
823 #endif
824 wrmsr(0x85, msr);
825
826 msr=rdmsr(0x83);
827 wrmsr(0x83, msr|0x10); /* enable write allocate */
828 intr_restore(saveintr);
829 }
830 }
831
832 void
833 enable_K6_wt_alloc(void)
834 {
835 quad_t size;
836 u_int64_t whcr;
837 register_t saveintr;
838
839 saveintr = intr_disable();
840 wbinvd();
841
842 #ifdef CPU_DISABLE_CACHE
843 /*
844 * Certain K6-2 box becomes unstable when write allocation is
845 * enabled.
846 */
847 /*
848 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
849 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
850 * All other bits in TR12 have no effect on the processer's operation.
851 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
852 * on the AMD-K6.
853 */
854 wrmsr(0x0000000e, (u_int64_t)0x0008);
855 #endif
856 /* Don't assume that memory size is aligned with 4M. */
857 if (Maxmem > 0)
858 size = ((Maxmem >> 8) + 3) >> 2;
859 else
860 size = 0;
861
862 /* Limit is 508M bytes. */
863 if (size > 0x7f)
864 size = 0x7f;
865 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
866
867 #if defined(PC98) || defined(NO_MEMORY_HOLE)
868 if (whcr & (0x7fLL << 1)) {
869 #ifdef PC98
870 /*
871 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
872 * 15-16M range.
873 */
874 if (!(inb(0x43b) & 4))
875 whcr &= ~0x0001LL;
876 else
877 #endif
878 whcr |= 0x0001LL;
879 }
880 #else
881 /*
882 * There is no way to know wheter 15-16M hole exists or not.
883 * Therefore, we disable write allocate for this range.
884 */
885 whcr &= ~0x0001LL;
886 #endif
887 wrmsr(0x0c0000082, whcr);
888
889 intr_restore(saveintr);
890 }
891
892 void
893 enable_K6_2_wt_alloc(void)
894 {
895 quad_t size;
896 u_int64_t whcr;
897 register_t saveintr;
898
899 saveintr = intr_disable();
900 wbinvd();
901
902 #ifdef CPU_DISABLE_CACHE
903 /*
904 * Certain K6-2 box becomes unstable when write allocation is
905 * enabled.
906 */
907 /*
908 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
909 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
910 * All other bits in TR12 have no effect on the processer's operation.
911 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
912 * on the AMD-K6.
913 */
914 wrmsr(0x0000000e, (u_int64_t)0x0008);
915 #endif
916 /* Don't assume that memory size is aligned with 4M. */
917 if (Maxmem > 0)
918 size = ((Maxmem >> 8) + 3) >> 2;
919 else
920 size = 0;
921
922 /* Limit is 4092M bytes. */
923 if (size > 0x3fff)
924 size = 0x3ff;
925 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
926
927 #if defined(PC98) || defined(NO_MEMORY_HOLE)
928 if (whcr & (0x3ffLL << 22)) {
929 #ifdef PC98
930 /*
931 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
932 * 15-16M range.
933 */
934 if (!(inb(0x43b) & 4))
935 whcr &= ~(1LL << 16);
936 else
937 #endif
938 whcr |= 1LL << 16;
939 }
940 #else
941 /*
942 * There is no way to know wheter 15-16M hole exists or not.
943 * Therefore, we disable write allocate for this range.
944 */
945 whcr &= ~(1LL << 16);
946 #endif
947 wrmsr(0x0c0000082, whcr);
948
949 intr_restore(saveintr);
950 }
951 #endif /* I585_CPU && CPU_WT_ALLOC */
952
953 #include "opt_ddb.h"
954 #ifdef DDB
955 #include <ddb/ddb.h>
956
957 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
958 {
959 register_t saveintr;
960 u_int cr0;
961 u_char ccr1, ccr2, ccr3;
962 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
963
964 cr0 = rcr0();
965 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
966 saveintr = intr_disable();
967
968
969 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
970 ccr0 = read_cyrix_reg(CCR0);
971 }
972 ccr1 = read_cyrix_reg(CCR1);
973 ccr2 = read_cyrix_reg(CCR2);
974 ccr3 = read_cyrix_reg(CCR3);
975 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
976 write_cyrix_reg(CCR3, CCR3_MAPEN0);
977 ccr4 = read_cyrix_reg(CCR4);
978 if ((cpu == CPU_M1) || (cpu == CPU_M2))
979 ccr5 = read_cyrix_reg(CCR5);
980 else
981 pcr0 = read_cyrix_reg(PCR0);
982 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
983 }
984 intr_restore(saveintr);
985
986 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
987 printf("CCR0=%x, ", (u_int)ccr0);
988
989 printf("CCR1=%x, CCR2=%x, CCR3=%x",
990 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
991 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
992 printf(", CCR4=%x, ", (u_int)ccr4);
993 if (cpu == CPU_M1SC)
994 printf("PCR0=%x\n", pcr0);
995 else
996 printf("CCR5=%x\n", ccr5);
997 }
998 }
999 printf("CR0=%x\n", cr0);
1000 }
1001 #endif /* DDB */
Cache object: 9142a888b76de43b94c99a58b65fc919
|