1 /*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include "opt_cpu.h"
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
39
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
43
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
49 #endif
50
51 void initializecpu(void);
52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
53 void enable_K5_wt_alloc(void);
54 void enable_K6_wt_alloc(void);
55 void enable_K6_2_wt_alloc(void);
56 #endif
57
58 #ifdef I486_CPU
59 static void init_5x86(void);
60 static void init_bluelightning(void);
61 static void init_486dlc(void);
62 static void init_cy486dx(void);
63 #ifdef CPU_I486_ON_386
64 static void init_i486_on_386(void);
65 #endif
66 static void init_6x86(void);
67 #endif /* I486_CPU */
68
69 #ifdef I686_CPU
70 static void init_6x86MX(void);
71 static void init_ppro(void);
72 static void init_mendocino(void);
73 #endif
74
75 static int hw_instruction_sse;
76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
77 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
78
79 /* Must *NOT* be BSS or locore will bzero these after setting them */
80 int cpu = 0; /* Are we 386, 386sx, 486, etc? */
81 u_int cpu_feature = 0; /* Feature flags */
82 u_int cpu_feature2 = 0; /* Feature flags */
83 u_int amd_feature = 0; /* AMD feature flags */
84 u_int amd_feature2 = 0; /* AMD feature flags */
85 u_int via_feature_rng = 0; /* VIA RNG features */
86 u_int via_feature_xcrypt = 0; /* VIA ACE features */
87 u_int cpu_high = 0; /* Highest arg to CPUID */
88 u_int cpu_id = 0; /* Stepping ID */
89 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
90 u_int cpu_procinfo2 = 0; /* Multicore info */
91 char cpu_vendor[20] = ""; /* CPU Origin code */
92
93 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
94 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
95 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
96 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
97
98 #ifdef CPU_ENABLE_SSE
99 u_int cpu_fxsr; /* SSE enabled */
100 u_int cpu_mxcsr_mask; /* valid bits in mxcsr */
101 #endif
102
103 #ifdef I486_CPU
104 /*
105 * IBM Blue Lightning
106 */
107 static void
108 init_bluelightning(void)
109 {
110 u_long eflags;
111
112 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
113 need_post_dma_flush = 1;
114 #endif
115
116 eflags = read_eflags();
117 disable_intr();
118
119 load_cr0(rcr0() | CR0_CD | CR0_NW);
120 invd();
121
122 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
123 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
124 #else
125 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
126 #endif
127 /* Enables 13MB and 0-640KB cache. */
128 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
129 #ifdef CPU_BLUELIGHTNING_3X
130 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
131 #else
132 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
133 #endif
134
135 /* Enable caching in CR0. */
136 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
137 invd();
138 write_eflags(eflags);
139 }
140
141 /*
142 * Cyrix 486SLC/DLC/SR/DR series
143 */
144 static void
145 init_486dlc(void)
146 {
147 u_long eflags;
148 u_char ccr0;
149
150 eflags = read_eflags();
151 disable_intr();
152 invd();
153
154 ccr0 = read_cyrix_reg(CCR0);
155 #ifndef CYRIX_CACHE_WORKS
156 ccr0 |= CCR0_NC1 | CCR0_BARB;
157 write_cyrix_reg(CCR0, ccr0);
158 invd();
159 #else
160 ccr0 &= ~CCR0_NC0;
161 #ifndef CYRIX_CACHE_REALLY_WORKS
162 ccr0 |= CCR0_NC1 | CCR0_BARB;
163 #else
164 ccr0 |= CCR0_NC1;
165 #endif
166 #ifdef CPU_DIRECT_MAPPED_CACHE
167 ccr0 |= CCR0_CO; /* Direct mapped mode. */
168 #endif
169 write_cyrix_reg(CCR0, ccr0);
170
171 /* Clear non-cacheable region. */
172 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
173 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
174 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
175 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
176
177 write_cyrix_reg(0, 0); /* dummy write */
178
179 /* Enable caching in CR0. */
180 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
181 invd();
182 #endif /* !CYRIX_CACHE_WORKS */
183 write_eflags(eflags);
184 }
185
186
187 /*
188 * Cyrix 486S/DX series
189 */
190 static void
191 init_cy486dx(void)
192 {
193 u_long eflags;
194 u_char ccr2;
195
196 eflags = read_eflags();
197 disable_intr();
198 invd();
199
200 ccr2 = read_cyrix_reg(CCR2);
201 #ifdef CPU_SUSP_HLT
202 ccr2 |= CCR2_SUSP_HLT;
203 #endif
204
205 #ifdef PC98
206 /* Enables WB cache interface pin and Lock NW bit in CR0. */
207 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
208 /* Unlock NW bit in CR0. */
209 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
210 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
211 #endif
212
213 write_cyrix_reg(CCR2, ccr2);
214 write_eflags(eflags);
215 }
216
217
218 /*
219 * Cyrix 5x86
220 */
221 static void
222 init_5x86(void)
223 {
224 u_long eflags;
225 u_char ccr2, ccr3, ccr4, pcr0;
226
227 eflags = read_eflags();
228 disable_intr();
229
230 load_cr0(rcr0() | CR0_CD | CR0_NW);
231 wbinvd();
232
233 (void)read_cyrix_reg(CCR3); /* dummy */
234
235 /* Initialize CCR2. */
236 ccr2 = read_cyrix_reg(CCR2);
237 ccr2 |= CCR2_WB;
238 #ifdef CPU_SUSP_HLT
239 ccr2 |= CCR2_SUSP_HLT;
240 #else
241 ccr2 &= ~CCR2_SUSP_HLT;
242 #endif
243 ccr2 |= CCR2_WT1;
244 write_cyrix_reg(CCR2, ccr2);
245
246 /* Initialize CCR4. */
247 ccr3 = read_cyrix_reg(CCR3);
248 write_cyrix_reg(CCR3, CCR3_MAPEN0);
249
250 ccr4 = read_cyrix_reg(CCR4);
251 ccr4 |= CCR4_DTE;
252 ccr4 |= CCR4_MEM;
253 #ifdef CPU_FASTER_5X86_FPU
254 ccr4 |= CCR4_FASTFPE;
255 #else
256 ccr4 &= ~CCR4_FASTFPE;
257 #endif
258 ccr4 &= ~CCR4_IOMASK;
259 /********************************************************************
260 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
261 * should be 0 for errata fix.
262 ********************************************************************/
263 #ifdef CPU_IORT
264 ccr4 |= CPU_IORT & CCR4_IOMASK;
265 #endif
266 write_cyrix_reg(CCR4, ccr4);
267
268 /* Initialize PCR0. */
269 /****************************************************************
270 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
271 * BTB_EN might make your system unstable.
272 ****************************************************************/
273 pcr0 = read_cyrix_reg(PCR0);
274 #ifdef CPU_RSTK_EN
275 pcr0 |= PCR0_RSTK;
276 #else
277 pcr0 &= ~PCR0_RSTK;
278 #endif
279 #ifdef CPU_BTB_EN
280 pcr0 |= PCR0_BTB;
281 #else
282 pcr0 &= ~PCR0_BTB;
283 #endif
284 #ifdef CPU_LOOP_EN
285 pcr0 |= PCR0_LOOP;
286 #else
287 pcr0 &= ~PCR0_LOOP;
288 #endif
289
290 /****************************************************************
291 * WARNING: if you use a memory mapped I/O device, don't use
292 * DISABLE_5X86_LSSER option, which may reorder memory mapped
293 * I/O access.
294 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
295 ****************************************************************/
296 #ifdef CPU_DISABLE_5X86_LSSER
297 pcr0 &= ~PCR0_LSSER;
298 #else
299 pcr0 |= PCR0_LSSER;
300 #endif
301 write_cyrix_reg(PCR0, pcr0);
302
303 /* Restore CCR3. */
304 write_cyrix_reg(CCR3, ccr3);
305
306 (void)read_cyrix_reg(0x80); /* dummy */
307
308 /* Unlock NW bit in CR0. */
309 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
310 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
311 /* Lock NW bit in CR0. */
312 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
313
314 write_eflags(eflags);
315 }
316
317 #ifdef CPU_I486_ON_386
318 /*
319 * There are i486 based upgrade products for i386 machines.
320 * In this case, BIOS doesn't enables CPU cache.
321 */
322 static void
323 init_i486_on_386(void)
324 {
325 u_long eflags;
326
327 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
328 need_post_dma_flush = 1;
329 #endif
330
331 eflags = read_eflags();
332 disable_intr();
333
334 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
335
336 write_eflags(eflags);
337 }
338 #endif
339
340 /*
341 * Cyrix 6x86
342 *
343 * XXX - What should I do here? Please let me know.
344 */
345 static void
346 init_6x86(void)
347 {
348 u_long eflags;
349 u_char ccr3, ccr4;
350
351 eflags = read_eflags();
352 disable_intr();
353
354 load_cr0(rcr0() | CR0_CD | CR0_NW);
355 wbinvd();
356
357 /* Initialize CCR0. */
358 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
359
360 /* Initialize CCR1. */
361 #ifdef CPU_CYRIX_NO_LOCK
362 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
363 #else
364 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
365 #endif
366
367 /* Initialize CCR2. */
368 #ifdef CPU_SUSP_HLT
369 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
370 #else
371 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
372 #endif
373
374 ccr3 = read_cyrix_reg(CCR3);
375 write_cyrix_reg(CCR3, CCR3_MAPEN0);
376
377 /* Initialize CCR4. */
378 ccr4 = read_cyrix_reg(CCR4);
379 ccr4 |= CCR4_DTE;
380 ccr4 &= ~CCR4_IOMASK;
381 #ifdef CPU_IORT
382 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
383 #else
384 write_cyrix_reg(CCR4, ccr4 | 7);
385 #endif
386
387 /* Initialize CCR5. */
388 #ifdef CPU_WT_ALLOC
389 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
390 #endif
391
392 /* Restore CCR3. */
393 write_cyrix_reg(CCR3, ccr3);
394
395 /* Unlock NW bit in CR0. */
396 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
397
398 /*
399 * Earlier revision of the 6x86 CPU could crash the system if
400 * L1 cache is in write-back mode.
401 */
402 if ((cyrix_did & 0xff00) > 0x1600)
403 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
404 else {
405 /* Revision 2.6 and lower. */
406 #ifdef CYRIX_CACHE_REALLY_WORKS
407 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
408 #else
409 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
410 #endif
411 }
412
413 /* Lock NW bit in CR0. */
414 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
415
416 write_eflags(eflags);
417 }
418 #endif /* I486_CPU */
419
420 #ifdef I686_CPU
421 /*
422 * Cyrix 6x86MX (code-named M2)
423 *
424 * XXX - What should I do here? Please let me know.
425 */
426 static void
427 init_6x86MX(void)
428 {
429 u_long eflags;
430 u_char ccr3, ccr4;
431
432 eflags = read_eflags();
433 disable_intr();
434
435 load_cr0(rcr0() | CR0_CD | CR0_NW);
436 wbinvd();
437
438 /* Initialize CCR0. */
439 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
440
441 /* Initialize CCR1. */
442 #ifdef CPU_CYRIX_NO_LOCK
443 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
444 #else
445 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
446 #endif
447
448 /* Initialize CCR2. */
449 #ifdef CPU_SUSP_HLT
450 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
451 #else
452 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
453 #endif
454
455 ccr3 = read_cyrix_reg(CCR3);
456 write_cyrix_reg(CCR3, CCR3_MAPEN0);
457
458 /* Initialize CCR4. */
459 ccr4 = read_cyrix_reg(CCR4);
460 ccr4 &= ~CCR4_IOMASK;
461 #ifdef CPU_IORT
462 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
463 #else
464 write_cyrix_reg(CCR4, ccr4 | 7);
465 #endif
466
467 /* Initialize CCR5. */
468 #ifdef CPU_WT_ALLOC
469 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
470 #endif
471
472 /* Restore CCR3. */
473 write_cyrix_reg(CCR3, ccr3);
474
475 /* Unlock NW bit in CR0. */
476 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
477
478 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
479
480 /* Lock NW bit in CR0. */
481 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
482
483 write_eflags(eflags);
484 }
485
486 static void
487 init_ppro(void)
488 {
489 u_int64_t apicbase;
490
491 /*
492 * Local APIC should be disabled if it is not going to be used.
493 */
494 apicbase = rdmsr(MSR_APICBASE);
495 apicbase &= ~APICBASE_ENABLED;
496 wrmsr(MSR_APICBASE, apicbase);
497 }
498
499 /*
500 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
501 * L2 cache).
502 */
503 static void
504 init_mendocino(void)
505 {
506 #ifdef CPU_PPRO2CELERON
507 u_long eflags;
508 u_int64_t bbl_cr_ctl3;
509
510 eflags = read_eflags();
511 disable_intr();
512
513 load_cr0(rcr0() | CR0_CD | CR0_NW);
514 wbinvd();
515
516 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
517
518 /* If the L2 cache is configured, do nothing. */
519 if (!(bbl_cr_ctl3 & 1)) {
520 bbl_cr_ctl3 = 0x134052bLL;
521
522 /* Set L2 Cache Latency (Default: 5). */
523 #ifdef CPU_CELERON_L2_LATENCY
524 #if CPU_L2_LATENCY > 15
525 #error invalid CPU_L2_LATENCY.
526 #endif
527 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
528 #else
529 bbl_cr_ctl3 |= 5 << 1;
530 #endif
531 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
532 }
533
534 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
535 write_eflags(eflags);
536 #endif /* CPU_PPRO2CELERON */
537 }
538
539 /*
540 * Initialize special VIA C3/C7 features
541 */
542 static void
543 init_via(void)
544 {
545 u_int regs[4], val;
546 u_int64_t msreg;
547
548 do_cpuid(0xc0000000, regs);
549 val = regs[0];
550 if (val >= 0xc0000001) {
551 do_cpuid(0xc0000001, regs);
552 val = regs[3];
553 } else
554 val = 0;
555
556 /* Enable RNG if present and disabled */
557 if (val & VIA_CPUID_HAS_RNG) {
558 if (!(val & VIA_CPUID_DO_RNG)) {
559 msreg = rdmsr(0x110B);
560 msreg |= 0x40;
561 wrmsr(0x110B, msreg);
562 }
563 via_feature_rng = VIA_HAS_RNG;
564 }
565 /* Enable AES engine if present and disabled */
566 if (val & VIA_CPUID_HAS_ACE) {
567 if (!(val & VIA_CPUID_DO_ACE)) {
568 msreg = rdmsr(0x1107);
569 msreg |= (0x01 << 28);
570 wrmsr(0x1107, msreg);
571 }
572 via_feature_xcrypt |= VIA_HAS_AES;
573 }
574 /* Enable ACE2 engine if present and disabled */
575 if (val & VIA_CPUID_HAS_ACE2) {
576 if (!(val & VIA_CPUID_DO_ACE2)) {
577 msreg = rdmsr(0x1107);
578 msreg |= (0x01 << 28);
579 wrmsr(0x1107, msreg);
580 }
581 via_feature_xcrypt |= VIA_HAS_AESCTR;
582 }
583 /* Enable SHA engine if present and disabled */
584 if (val & VIA_CPUID_HAS_PHE) {
585 if (!(val & VIA_CPUID_DO_PHE)) {
586 msreg = rdmsr(0x1107);
587 msreg |= (0x01 << 28/**/);
588 wrmsr(0x1107, msreg);
589 }
590 via_feature_xcrypt |= VIA_HAS_SHA;
591 }
592 /* Enable MM engine if present and disabled */
593 if (val & VIA_CPUID_HAS_PMM) {
594 if (!(val & VIA_CPUID_DO_PMM)) {
595 msreg = rdmsr(0x1107);
596 msreg |= (0x01 << 28/**/);
597 wrmsr(0x1107, msreg);
598 }
599 via_feature_xcrypt |= VIA_HAS_MM;
600 }
601 }
602
603 #endif /* I686_CPU */
604
605 /*
606 * Initialize CR4 (Control register 4) to enable SSE instructions.
607 */
608 void
609 enable_sse(void)
610 {
611 #if defined(CPU_ENABLE_SSE)
612 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
613 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
614 cpu_fxsr = hw_instruction_sse = 1;
615 }
616 #endif
617 }
618
619 void
620 initializecpu(void)
621 {
622
623 switch (cpu) {
624 #ifdef I486_CPU
625 case CPU_BLUE:
626 init_bluelightning();
627 break;
628 case CPU_486DLC:
629 init_486dlc();
630 break;
631 case CPU_CY486DX:
632 init_cy486dx();
633 break;
634 case CPU_M1SC:
635 init_5x86();
636 break;
637 #ifdef CPU_I486_ON_386
638 case CPU_486:
639 init_i486_on_386();
640 break;
641 #endif
642 case CPU_M1:
643 init_6x86();
644 break;
645 #endif /* I486_CPU */
646 #ifdef I686_CPU
647 case CPU_M2:
648 init_6x86MX();
649 break;
650 case CPU_686:
651 if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
652 switch (cpu_id & 0xff0) {
653 case 0x610:
654 init_ppro();
655 break;
656 case 0x660:
657 init_mendocino();
658 break;
659 }
660 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
661 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
662 /*
663 * Sometimes the BIOS doesn't enable SSE instructions.
664 * According to AMD document 20734, the mobile
665 * Duron, the (mobile) Athlon 4 and the Athlon MP
666 * support SSE. These correspond to cpu_id 0x66X
667 * or 0x67X.
668 */
669 if ((cpu_feature & CPUID_XMM) == 0 &&
670 ((cpu_id & ~0xf) == 0x660 ||
671 (cpu_id & ~0xf) == 0x670 ||
672 (cpu_id & ~0xf) == 0x680)) {
673 u_int regs[4];
674 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
675 do_cpuid(1, regs);
676 cpu_feature = regs[3];
677 }
678 #endif
679 } else if (strcmp(cpu_vendor, "CentaurHauls") == 0) {
680 switch (cpu_id & 0xff0) {
681 case 0x690:
682 if ((cpu_id & 0xf) < 3)
683 break;
684 /* fall through. */
685 case 0x6a0:
686 case 0x6d0:
687 init_via();
688 break;
689 default:
690 break;
691 }
692 }
693 #ifdef PAE
694 if ((amd_feature & AMDID_NX) != 0) {
695 uint64_t msr;
696
697 msr = rdmsr(MSR_EFER) | EFER_NXE;
698 wrmsr(MSR_EFER, msr);
699 pg_nx = PG_NX;
700 }
701 #endif
702 break;
703 #endif
704 default:
705 break;
706 }
707 enable_sse();
708
709 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
710 /*
711 * OS should flush L1 cache by itself because no PC-98 supports
712 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
713 * when need_pre_dma_flush = 1, use invd instruction after DMA
714 * transfer when need_post_dma_flush = 1. If your CPU upgrade
715 * product supports hardware cache control, you can add the
716 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
717 * This option eliminates unneeded cache flush instruction(s).
718 */
719 if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
720 switch (cpu) {
721 #ifdef I486_CPU
722 case CPU_486DLC:
723 need_post_dma_flush = 1;
724 break;
725 case CPU_M1SC:
726 need_pre_dma_flush = 1;
727 break;
728 case CPU_CY486DX:
729 need_pre_dma_flush = 1;
730 #ifdef CPU_I486_ON_386
731 need_post_dma_flush = 1;
732 #endif
733 break;
734 #endif
735 default:
736 break;
737 }
738 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
739 switch (cpu_id & 0xFF0) {
740 case 0x470: /* Enhanced Am486DX2 WB */
741 case 0x490: /* Enhanced Am486DX4 WB */
742 case 0x4F0: /* Am5x86 WB */
743 need_pre_dma_flush = 1;
744 break;
745 }
746 } else if (strcmp(cpu_vendor, "IBM") == 0) {
747 need_post_dma_flush = 1;
748 } else {
749 #ifdef CPU_I486_ON_386
750 need_pre_dma_flush = 1;
751 #endif
752 }
753 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
754 }
755
756 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
757 /*
758 * Enable write allocate feature of AMD processors.
759 * Following two functions require the Maxmem variable being set.
760 */
761 void
762 enable_K5_wt_alloc(void)
763 {
764 u_int64_t msr;
765 register_t savecrit;
766
767 /*
768 * Write allocate is supported only on models 1, 2, and 3, with
769 * a stepping of 4 or greater.
770 */
771 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
772 savecrit = intr_disable();
773 msr = rdmsr(0x83); /* HWCR */
774 wrmsr(0x83, msr & !(0x10));
775
776 /*
777 * We have to tell the chip where the top of memory is,
778 * since video cards could have frame bufferes there,
779 * memory-mapped I/O could be there, etc.
780 */
781 if(Maxmem > 0)
782 msr = Maxmem / 16;
783 else
784 msr = 0;
785 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
786 #ifdef PC98
787 if (!(inb(0x43b) & 4)) {
788 wrmsr(0x86, 0x0ff00f0);
789 msr |= AMD_WT_ALLOC_PRE;
790 }
791 #else
792 /*
793 * There is no way to know wheter 15-16M hole exists or not.
794 * Therefore, we disable write allocate for this range.
795 */
796 wrmsr(0x86, 0x0ff00f0);
797 msr |= AMD_WT_ALLOC_PRE;
798 #endif
799 wrmsr(0x85, msr);
800
801 msr=rdmsr(0x83);
802 wrmsr(0x83, msr|0x10); /* enable write allocate */
803 intr_restore(savecrit);
804 }
805 }
806
807 void
808 enable_K6_wt_alloc(void)
809 {
810 quad_t size;
811 u_int64_t whcr;
812 u_long eflags;
813
814 eflags = read_eflags();
815 disable_intr();
816 wbinvd();
817
818 #ifdef CPU_DISABLE_CACHE
819 /*
820 * Certain K6-2 box becomes unstable when write allocation is
821 * enabled.
822 */
823 /*
824 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
825 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
826 * All other bits in TR12 have no effect on the processer's operation.
827 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
828 * on the AMD-K6.
829 */
830 wrmsr(0x0000000e, (u_int64_t)0x0008);
831 #endif
832 /* Don't assume that memory size is aligned with 4M. */
833 if (Maxmem > 0)
834 size = ((Maxmem >> 8) + 3) >> 2;
835 else
836 size = 0;
837
838 /* Limit is 508M bytes. */
839 if (size > 0x7f)
840 size = 0x7f;
841 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
842
843 #if defined(PC98) || defined(NO_MEMORY_HOLE)
844 if (whcr & (0x7fLL << 1)) {
845 #ifdef PC98
846 /*
847 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
848 * 15-16M range.
849 */
850 if (!(inb(0x43b) & 4))
851 whcr &= ~0x0001LL;
852 else
853 #endif
854 whcr |= 0x0001LL;
855 }
856 #else
857 /*
858 * There is no way to know wheter 15-16M hole exists or not.
859 * Therefore, we disable write allocate for this range.
860 */
861 whcr &= ~0x0001LL;
862 #endif
863 wrmsr(0x0c0000082, whcr);
864
865 write_eflags(eflags);
866 }
867
868 void
869 enable_K6_2_wt_alloc(void)
870 {
871 quad_t size;
872 u_int64_t whcr;
873 u_long eflags;
874
875 eflags = read_eflags();
876 disable_intr();
877 wbinvd();
878
879 #ifdef CPU_DISABLE_CACHE
880 /*
881 * Certain K6-2 box becomes unstable when write allocation is
882 * enabled.
883 */
884 /*
885 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
886 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
887 * All other bits in TR12 have no effect on the processer's operation.
888 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
889 * on the AMD-K6.
890 */
891 wrmsr(0x0000000e, (u_int64_t)0x0008);
892 #endif
893 /* Don't assume that memory size is aligned with 4M. */
894 if (Maxmem > 0)
895 size = ((Maxmem >> 8) + 3) >> 2;
896 else
897 size = 0;
898
899 /* Limit is 4092M bytes. */
900 if (size > 0x3fff)
901 size = 0x3ff;
902 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
903
904 #if defined(PC98) || defined(NO_MEMORY_HOLE)
905 if (whcr & (0x3ffLL << 22)) {
906 #ifdef PC98
907 /*
908 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
909 * 15-16M range.
910 */
911 if (!(inb(0x43b) & 4))
912 whcr &= ~(1LL << 16);
913 else
914 #endif
915 whcr |= 1LL << 16;
916 }
917 #else
918 /*
919 * There is no way to know wheter 15-16M hole exists or not.
920 * Therefore, we disable write allocate for this range.
921 */
922 whcr &= ~(1LL << 16);
923 #endif
924 wrmsr(0x0c0000082, whcr);
925
926 write_eflags(eflags);
927 }
928 #endif /* I585_CPU && CPU_WT_ALLOC */
929
930 #include "opt_ddb.h"
931 #ifdef DDB
932 #include <ddb/ddb.h>
933
934 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
935 {
936 u_long eflags;
937 u_int cr0;
938 u_char ccr1, ccr2, ccr3;
939 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
940
941 cr0 = rcr0();
942 if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
943 eflags = read_eflags();
944 disable_intr();
945
946
947 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
948 ccr0 = read_cyrix_reg(CCR0);
949 }
950 ccr1 = read_cyrix_reg(CCR1);
951 ccr2 = read_cyrix_reg(CCR2);
952 ccr3 = read_cyrix_reg(CCR3);
953 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
954 write_cyrix_reg(CCR3, CCR3_MAPEN0);
955 ccr4 = read_cyrix_reg(CCR4);
956 if ((cpu == CPU_M1) || (cpu == CPU_M2))
957 ccr5 = read_cyrix_reg(CCR5);
958 else
959 pcr0 = read_cyrix_reg(PCR0);
960 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
961 }
962 write_eflags(eflags);
963
964 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
965 printf("CCR0=%x, ", (u_int)ccr0);
966
967 printf("CCR1=%x, CCR2=%x, CCR3=%x",
968 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
969 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
970 printf(", CCR4=%x, ", (u_int)ccr4);
971 if (cpu == CPU_M1SC)
972 printf("PCR0=%x\n", pcr0);
973 else
974 printf("CCR5=%x\n", ccr5);
975 }
976 }
977 printf("CR0=%x\n", cr0);
978 }
979 #endif /* DDB */
Cache object: 0e82e06475671f1a201c83921a5ecd1c
|