1 /*-
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.0/sys/i386/i386/initcpu.c 197744 2009-10-04 12:20:59Z kib $");
32
33 #include "opt_cpu.h"
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/sysctl.h>
39
40 #include <machine/cputypes.h>
41 #include <machine/md_var.h>
42 #include <machine/specialreg.h>
43
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
48 #define CPU_ENABLE_SSE
49 #endif
50
51 void initializecpu(void);
52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
53 void enable_K5_wt_alloc(void);
54 void enable_K6_wt_alloc(void);
55 void enable_K6_2_wt_alloc(void);
56 #endif
57
58 #ifdef I486_CPU
59 static void init_5x86(void);
60 static void init_bluelightning(void);
61 static void init_486dlc(void);
62 static void init_cy486dx(void);
63 #ifdef CPU_I486_ON_386
64 static void init_i486_on_386(void);
65 #endif
66 static void init_6x86(void);
67 #endif /* I486_CPU */
68
69 #ifdef I686_CPU
70 static void init_6x86MX(void);
71 static void init_ppro(void);
72 static void init_mendocino(void);
73 #endif
74
75 static int hw_instruction_sse;
76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
77 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
78
79 /* Must *NOT* be BSS or locore will bzero these after setting them */
80 int cpu = 0; /* Are we 386, 386sx, 486, etc? */
81 u_int cpu_feature = 0; /* Feature flags */
82 u_int cpu_feature2 = 0; /* Feature flags */
83 u_int amd_feature = 0; /* AMD feature flags */
84 u_int amd_feature2 = 0; /* AMD feature flags */
85 u_int amd_pminfo = 0; /* AMD advanced power management info */
86 u_int via_feature_rng = 0; /* VIA RNG features */
87 u_int via_feature_xcrypt = 0; /* VIA ACE features */
88 u_int cpu_high = 0; /* Highest arg to CPUID */
89 u_int cpu_id = 0; /* Stepping ID */
90 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */
91 u_int cpu_procinfo2 = 0; /* Multicore info */
92 char cpu_vendor[20] = ""; /* CPU Origin code */
93 u_int cpu_vendor_id = 0; /* CPU vendor ID */
94 u_int cpu_clflush_line_size = 32;
95
96 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
97 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU");
98 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD,
99 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU");
100
101 #ifdef CPU_ENABLE_SSE
102 u_int cpu_fxsr; /* SSE enabled */
103 u_int cpu_mxcsr_mask; /* valid bits in mxcsr */
104 #endif
105
106 #ifdef I486_CPU
107 /*
108 * IBM Blue Lightning
109 */
110 static void
111 init_bluelightning(void)
112 {
113 u_long eflags;
114
115 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
116 need_post_dma_flush = 1;
117 #endif
118
119 eflags = read_eflags();
120 disable_intr();
121
122 load_cr0(rcr0() | CR0_CD | CR0_NW);
123 invd();
124
125 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
126 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
127 #else
128 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
129 #endif
130 /* Enables 13MB and 0-640KB cache. */
131 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
132 #ifdef CPU_BLUELIGHTNING_3X
133 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
134 #else
135 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
136 #endif
137
138 /* Enable caching in CR0. */
139 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
140 invd();
141 write_eflags(eflags);
142 }
143
144 /*
145 * Cyrix 486SLC/DLC/SR/DR series
146 */
147 static void
148 init_486dlc(void)
149 {
150 u_long eflags;
151 u_char ccr0;
152
153 eflags = read_eflags();
154 disable_intr();
155 invd();
156
157 ccr0 = read_cyrix_reg(CCR0);
158 #ifndef CYRIX_CACHE_WORKS
159 ccr0 |= CCR0_NC1 | CCR0_BARB;
160 write_cyrix_reg(CCR0, ccr0);
161 invd();
162 #else
163 ccr0 &= ~CCR0_NC0;
164 #ifndef CYRIX_CACHE_REALLY_WORKS
165 ccr0 |= CCR0_NC1 | CCR0_BARB;
166 #else
167 ccr0 |= CCR0_NC1;
168 #endif
169 #ifdef CPU_DIRECT_MAPPED_CACHE
170 ccr0 |= CCR0_CO; /* Direct mapped mode. */
171 #endif
172 write_cyrix_reg(CCR0, ccr0);
173
174 /* Clear non-cacheable region. */
175 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
176 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
177 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
178 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
179
180 write_cyrix_reg(0, 0); /* dummy write */
181
182 /* Enable caching in CR0. */
183 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
184 invd();
185 #endif /* !CYRIX_CACHE_WORKS */
186 write_eflags(eflags);
187 }
188
189
190 /*
191 * Cyrix 486S/DX series
192 */
193 static void
194 init_cy486dx(void)
195 {
196 u_long eflags;
197 u_char ccr2;
198
199 eflags = read_eflags();
200 disable_intr();
201 invd();
202
203 ccr2 = read_cyrix_reg(CCR2);
204 #ifdef CPU_SUSP_HLT
205 ccr2 |= CCR2_SUSP_HLT;
206 #endif
207
208 #ifdef PC98
209 /* Enables WB cache interface pin and Lock NW bit in CR0. */
210 ccr2 |= CCR2_WB | CCR2_LOCK_NW;
211 /* Unlock NW bit in CR0. */
212 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
213 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
214 #endif
215
216 write_cyrix_reg(CCR2, ccr2);
217 write_eflags(eflags);
218 }
219
220
221 /*
222 * Cyrix 5x86
223 */
224 static void
225 init_5x86(void)
226 {
227 u_long eflags;
228 u_char ccr2, ccr3, ccr4, pcr0;
229
230 eflags = read_eflags();
231 disable_intr();
232
233 load_cr0(rcr0() | CR0_CD | CR0_NW);
234 wbinvd();
235
236 (void)read_cyrix_reg(CCR3); /* dummy */
237
238 /* Initialize CCR2. */
239 ccr2 = read_cyrix_reg(CCR2);
240 ccr2 |= CCR2_WB;
241 #ifdef CPU_SUSP_HLT
242 ccr2 |= CCR2_SUSP_HLT;
243 #else
244 ccr2 &= ~CCR2_SUSP_HLT;
245 #endif
246 ccr2 |= CCR2_WT1;
247 write_cyrix_reg(CCR2, ccr2);
248
249 /* Initialize CCR4. */
250 ccr3 = read_cyrix_reg(CCR3);
251 write_cyrix_reg(CCR3, CCR3_MAPEN0);
252
253 ccr4 = read_cyrix_reg(CCR4);
254 ccr4 |= CCR4_DTE;
255 ccr4 |= CCR4_MEM;
256 #ifdef CPU_FASTER_5X86_FPU
257 ccr4 |= CCR4_FASTFPE;
258 #else
259 ccr4 &= ~CCR4_FASTFPE;
260 #endif
261 ccr4 &= ~CCR4_IOMASK;
262 /********************************************************************
263 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
264 * should be 0 for errata fix.
265 ********************************************************************/
266 #ifdef CPU_IORT
267 ccr4 |= CPU_IORT & CCR4_IOMASK;
268 #endif
269 write_cyrix_reg(CCR4, ccr4);
270
271 /* Initialize PCR0. */
272 /****************************************************************
273 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
274 * BTB_EN might make your system unstable.
275 ****************************************************************/
276 pcr0 = read_cyrix_reg(PCR0);
277 #ifdef CPU_RSTK_EN
278 pcr0 |= PCR0_RSTK;
279 #else
280 pcr0 &= ~PCR0_RSTK;
281 #endif
282 #ifdef CPU_BTB_EN
283 pcr0 |= PCR0_BTB;
284 #else
285 pcr0 &= ~PCR0_BTB;
286 #endif
287 #ifdef CPU_LOOP_EN
288 pcr0 |= PCR0_LOOP;
289 #else
290 pcr0 &= ~PCR0_LOOP;
291 #endif
292
293 /****************************************************************
294 * WARNING: if you use a memory mapped I/O device, don't use
295 * DISABLE_5X86_LSSER option, which may reorder memory mapped
296 * I/O access.
297 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
298 ****************************************************************/
299 #ifdef CPU_DISABLE_5X86_LSSER
300 pcr0 &= ~PCR0_LSSER;
301 #else
302 pcr0 |= PCR0_LSSER;
303 #endif
304 write_cyrix_reg(PCR0, pcr0);
305
306 /* Restore CCR3. */
307 write_cyrix_reg(CCR3, ccr3);
308
309 (void)read_cyrix_reg(0x80); /* dummy */
310
311 /* Unlock NW bit in CR0. */
312 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
313 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
314 /* Lock NW bit in CR0. */
315 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
316
317 write_eflags(eflags);
318 }
319
320 #ifdef CPU_I486_ON_386
321 /*
322 * There are i486 based upgrade products for i386 machines.
323 * In this case, BIOS doesn't enables CPU cache.
324 */
325 static void
326 init_i486_on_386(void)
327 {
328 u_long eflags;
329
330 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
331 need_post_dma_flush = 1;
332 #endif
333
334 eflags = read_eflags();
335 disable_intr();
336
337 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
338
339 write_eflags(eflags);
340 }
341 #endif
342
343 /*
344 * Cyrix 6x86
345 *
346 * XXX - What should I do here? Please let me know.
347 */
348 static void
349 init_6x86(void)
350 {
351 u_long eflags;
352 u_char ccr3, ccr4;
353
354 eflags = read_eflags();
355 disable_intr();
356
357 load_cr0(rcr0() | CR0_CD | CR0_NW);
358 wbinvd();
359
360 /* Initialize CCR0. */
361 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
362
363 /* Initialize CCR1. */
364 #ifdef CPU_CYRIX_NO_LOCK
365 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
366 #else
367 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
368 #endif
369
370 /* Initialize CCR2. */
371 #ifdef CPU_SUSP_HLT
372 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
373 #else
374 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
375 #endif
376
377 ccr3 = read_cyrix_reg(CCR3);
378 write_cyrix_reg(CCR3, CCR3_MAPEN0);
379
380 /* Initialize CCR4. */
381 ccr4 = read_cyrix_reg(CCR4);
382 ccr4 |= CCR4_DTE;
383 ccr4 &= ~CCR4_IOMASK;
384 #ifdef CPU_IORT
385 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
386 #else
387 write_cyrix_reg(CCR4, ccr4 | 7);
388 #endif
389
390 /* Initialize CCR5. */
391 #ifdef CPU_WT_ALLOC
392 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
393 #endif
394
395 /* Restore CCR3. */
396 write_cyrix_reg(CCR3, ccr3);
397
398 /* Unlock NW bit in CR0. */
399 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
400
401 /*
402 * Earlier revision of the 6x86 CPU could crash the system if
403 * L1 cache is in write-back mode.
404 */
405 if ((cyrix_did & 0xff00) > 0x1600)
406 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
407 else {
408 /* Revision 2.6 and lower. */
409 #ifdef CYRIX_CACHE_REALLY_WORKS
410 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
411 #else
412 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
413 #endif
414 }
415
416 /* Lock NW bit in CR0. */
417 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
418
419 write_eflags(eflags);
420 }
421 #endif /* I486_CPU */
422
423 #ifdef I686_CPU
424 /*
425 * Cyrix 6x86MX (code-named M2)
426 *
427 * XXX - What should I do here? Please let me know.
428 */
429 static void
430 init_6x86MX(void)
431 {
432 u_long eflags;
433 u_char ccr3, ccr4;
434
435 eflags = read_eflags();
436 disable_intr();
437
438 load_cr0(rcr0() | CR0_CD | CR0_NW);
439 wbinvd();
440
441 /* Initialize CCR0. */
442 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
443
444 /* Initialize CCR1. */
445 #ifdef CPU_CYRIX_NO_LOCK
446 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
447 #else
448 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
449 #endif
450
451 /* Initialize CCR2. */
452 #ifdef CPU_SUSP_HLT
453 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
454 #else
455 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
456 #endif
457
458 ccr3 = read_cyrix_reg(CCR3);
459 write_cyrix_reg(CCR3, CCR3_MAPEN0);
460
461 /* Initialize CCR4. */
462 ccr4 = read_cyrix_reg(CCR4);
463 ccr4 &= ~CCR4_IOMASK;
464 #ifdef CPU_IORT
465 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
466 #else
467 write_cyrix_reg(CCR4, ccr4 | 7);
468 #endif
469
470 /* Initialize CCR5. */
471 #ifdef CPU_WT_ALLOC
472 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
473 #endif
474
475 /* Restore CCR3. */
476 write_cyrix_reg(CCR3, ccr3);
477
478 /* Unlock NW bit in CR0. */
479 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
480
481 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
482
483 /* Lock NW bit in CR0. */
484 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
485
486 write_eflags(eflags);
487 }
488
489 static void
490 init_ppro(void)
491 {
492 u_int64_t apicbase;
493
494 /*
495 * Local APIC should be disabled if it is not going to be used.
496 */
497 apicbase = rdmsr(MSR_APICBASE);
498 apicbase &= ~APICBASE_ENABLED;
499 wrmsr(MSR_APICBASE, apicbase);
500 }
501
502 /*
503 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
504 * L2 cache).
505 */
506 static void
507 init_mendocino(void)
508 {
509 #ifdef CPU_PPRO2CELERON
510 u_long eflags;
511 u_int64_t bbl_cr_ctl3;
512
513 eflags = read_eflags();
514 disable_intr();
515
516 load_cr0(rcr0() | CR0_CD | CR0_NW);
517 wbinvd();
518
519 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
520
521 /* If the L2 cache is configured, do nothing. */
522 if (!(bbl_cr_ctl3 & 1)) {
523 bbl_cr_ctl3 = 0x134052bLL;
524
525 /* Set L2 Cache Latency (Default: 5). */
526 #ifdef CPU_CELERON_L2_LATENCY
527 #if CPU_L2_LATENCY > 15
528 #error invalid CPU_L2_LATENCY.
529 #endif
530 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
531 #else
532 bbl_cr_ctl3 |= 5 << 1;
533 #endif
534 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
535 }
536
537 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
538 write_eflags(eflags);
539 #endif /* CPU_PPRO2CELERON */
540 }
541
542 /*
543 * Initialize special VIA C3/C7 features
544 */
545 static void
546 init_via(void)
547 {
548 u_int regs[4], val;
549 u_int64_t msreg;
550
551 do_cpuid(0xc0000000, regs);
552 val = regs[0];
553 if (val >= 0xc0000001) {
554 do_cpuid(0xc0000001, regs);
555 val = regs[3];
556 } else
557 val = 0;
558
559 /* Enable RNG if present and disabled */
560 if (val & VIA_CPUID_HAS_RNG) {
561 if (!(val & VIA_CPUID_DO_RNG)) {
562 msreg = rdmsr(0x110B);
563 msreg |= 0x40;
564 wrmsr(0x110B, msreg);
565 }
566 via_feature_rng = VIA_HAS_RNG;
567 }
568 /* Enable AES engine if present and disabled */
569 if (val & VIA_CPUID_HAS_ACE) {
570 if (!(val & VIA_CPUID_DO_ACE)) {
571 msreg = rdmsr(0x1107);
572 msreg |= (0x01 << 28);
573 wrmsr(0x1107, msreg);
574 }
575 via_feature_xcrypt |= VIA_HAS_AES;
576 }
577 /* Enable ACE2 engine if present and disabled */
578 if (val & VIA_CPUID_HAS_ACE2) {
579 if (!(val & VIA_CPUID_DO_ACE2)) {
580 msreg = rdmsr(0x1107);
581 msreg |= (0x01 << 28);
582 wrmsr(0x1107, msreg);
583 }
584 via_feature_xcrypt |= VIA_HAS_AESCTR;
585 }
586 /* Enable SHA engine if present and disabled */
587 if (val & VIA_CPUID_HAS_PHE) {
588 if (!(val & VIA_CPUID_DO_PHE)) {
589 msreg = rdmsr(0x1107);
590 msreg |= (0x01 << 28/**/);
591 wrmsr(0x1107, msreg);
592 }
593 via_feature_xcrypt |= VIA_HAS_SHA;
594 }
595 /* Enable MM engine if present and disabled */
596 if (val & VIA_CPUID_HAS_PMM) {
597 if (!(val & VIA_CPUID_DO_PMM)) {
598 msreg = rdmsr(0x1107);
599 msreg |= (0x01 << 28/**/);
600 wrmsr(0x1107, msreg);
601 }
602 via_feature_xcrypt |= VIA_HAS_MM;
603 }
604 }
605
606 #endif /* I686_CPU */
607
608 /*
609 * Initialize CR4 (Control register 4) to enable SSE instructions.
610 */
611 void
612 enable_sse(void)
613 {
614 #if defined(CPU_ENABLE_SSE)
615 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
616 load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
617 cpu_fxsr = hw_instruction_sse = 1;
618 }
619 #endif
620 }
621
622 void
623 initializecpu(void)
624 {
625
626 switch (cpu) {
627 #ifdef I486_CPU
628 case CPU_BLUE:
629 init_bluelightning();
630 break;
631 case CPU_486DLC:
632 init_486dlc();
633 break;
634 case CPU_CY486DX:
635 init_cy486dx();
636 break;
637 case CPU_M1SC:
638 init_5x86();
639 break;
640 #ifdef CPU_I486_ON_386
641 case CPU_486:
642 init_i486_on_386();
643 break;
644 #endif
645 case CPU_M1:
646 init_6x86();
647 break;
648 #endif /* I486_CPU */
649 #ifdef I686_CPU
650 case CPU_M2:
651 init_6x86MX();
652 break;
653 case CPU_686:
654 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
655 switch (cpu_id & 0xff0) {
656 case 0x610:
657 init_ppro();
658 break;
659 case 0x660:
660 init_mendocino();
661 break;
662 }
663 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
664 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
665 /*
666 * Sometimes the BIOS doesn't enable SSE instructions.
667 * According to AMD document 20734, the mobile
668 * Duron, the (mobile) Athlon 4 and the Athlon MP
669 * support SSE. These correspond to cpu_id 0x66X
670 * or 0x67X.
671 */
672 if ((cpu_feature & CPUID_XMM) == 0 &&
673 ((cpu_id & ~0xf) == 0x660 ||
674 (cpu_id & ~0xf) == 0x670 ||
675 (cpu_id & ~0xf) == 0x680)) {
676 u_int regs[4];
677 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
678 do_cpuid(1, regs);
679 cpu_feature = regs[3];
680 }
681 #endif
682 } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
683 switch (cpu_id & 0xff0) {
684 case 0x690:
685 if ((cpu_id & 0xf) < 3)
686 break;
687 /* fall through. */
688 case 0x6a0:
689 case 0x6d0:
690 case 0x6f0:
691 init_via();
692 break;
693 default:
694 break;
695 }
696 }
697 #ifdef PAE
698 if ((amd_feature & AMDID_NX) != 0) {
699 uint64_t msr;
700
701 msr = rdmsr(MSR_EFER) | EFER_NXE;
702 wrmsr(MSR_EFER, msr);
703 pg_nx = PG_NX;
704 }
705 #endif
706 break;
707 #endif
708 default:
709 break;
710 }
711 enable_sse();
712
713 /*
714 * CPUID with %eax = 1, %ebx returns
715 * Bits 15-8: CLFLUSH line size
716 * (Value * 8 = cache line size in bytes)
717 */
718 if ((cpu_feature & CPUID_CLFSH) != 0)
719 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
720 /*
721 * XXXKIB: (temporary) hack to work around traps generated when
722 * CLFLUSHing APIC registers window.
723 */
724 if (cpu_vendor_id == CPU_VENDOR_INTEL && !(cpu_feature & CPUID_SS))
725 cpu_feature &= ~CPUID_CLFSH;
726
727 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
728 /*
729 * OS should flush L1 cache by itself because no PC-98 supports
730 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
731 * when need_pre_dma_flush = 1, use invd instruction after DMA
732 * transfer when need_post_dma_flush = 1. If your CPU upgrade
733 * product supports hardware cache control, you can add the
734 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
735 * This option eliminates unneeded cache flush instruction(s).
736 */
737 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
738 switch (cpu) {
739 #ifdef I486_CPU
740 case CPU_486DLC:
741 need_post_dma_flush = 1;
742 break;
743 case CPU_M1SC:
744 need_pre_dma_flush = 1;
745 break;
746 case CPU_CY486DX:
747 need_pre_dma_flush = 1;
748 #ifdef CPU_I486_ON_386
749 need_post_dma_flush = 1;
750 #endif
751 break;
752 #endif
753 default:
754 break;
755 }
756 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
757 switch (cpu_id & 0xFF0) {
758 case 0x470: /* Enhanced Am486DX2 WB */
759 case 0x490: /* Enhanced Am486DX4 WB */
760 case 0x4F0: /* Am5x86 WB */
761 need_pre_dma_flush = 1;
762 break;
763 }
764 } else if (cpu_vendor_id == CPU_VENDOR_IBM) {
765 need_post_dma_flush = 1;
766 } else {
767 #ifdef CPU_I486_ON_386
768 need_pre_dma_flush = 1;
769 #endif
770 }
771 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
772 }
773
774 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
775 /*
776 * Enable write allocate feature of AMD processors.
777 * Following two functions require the Maxmem variable being set.
778 */
779 void
780 enable_K5_wt_alloc(void)
781 {
782 u_int64_t msr;
783 register_t savecrit;
784
785 /*
786 * Write allocate is supported only on models 1, 2, and 3, with
787 * a stepping of 4 or greater.
788 */
789 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
790 savecrit = intr_disable();
791 msr = rdmsr(0x83); /* HWCR */
792 wrmsr(0x83, msr & !(0x10));
793
794 /*
795 * We have to tell the chip where the top of memory is,
796 * since video cards could have frame bufferes there,
797 * memory-mapped I/O could be there, etc.
798 */
799 if(Maxmem > 0)
800 msr = Maxmem / 16;
801 else
802 msr = 0;
803 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
804 #ifdef PC98
805 if (!(inb(0x43b) & 4)) {
806 wrmsr(0x86, 0x0ff00f0);
807 msr |= AMD_WT_ALLOC_PRE;
808 }
809 #else
810 /*
811 * There is no way to know wheter 15-16M hole exists or not.
812 * Therefore, we disable write allocate for this range.
813 */
814 wrmsr(0x86, 0x0ff00f0);
815 msr |= AMD_WT_ALLOC_PRE;
816 #endif
817 wrmsr(0x85, msr);
818
819 msr=rdmsr(0x83);
820 wrmsr(0x83, msr|0x10); /* enable write allocate */
821 intr_restore(savecrit);
822 }
823 }
824
825 void
826 enable_K6_wt_alloc(void)
827 {
828 quad_t size;
829 u_int64_t whcr;
830 u_long eflags;
831
832 eflags = read_eflags();
833 disable_intr();
834 wbinvd();
835
836 #ifdef CPU_DISABLE_CACHE
837 /*
838 * Certain K6-2 box becomes unstable when write allocation is
839 * enabled.
840 */
841 /*
842 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
843 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
844 * All other bits in TR12 have no effect on the processer's operation.
845 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
846 * on the AMD-K6.
847 */
848 wrmsr(0x0000000e, (u_int64_t)0x0008);
849 #endif
850 /* Don't assume that memory size is aligned with 4M. */
851 if (Maxmem > 0)
852 size = ((Maxmem >> 8) + 3) >> 2;
853 else
854 size = 0;
855
856 /* Limit is 508M bytes. */
857 if (size > 0x7f)
858 size = 0x7f;
859 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
860
861 #if defined(PC98) || defined(NO_MEMORY_HOLE)
862 if (whcr & (0x7fLL << 1)) {
863 #ifdef PC98
864 /*
865 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
866 * 15-16M range.
867 */
868 if (!(inb(0x43b) & 4))
869 whcr &= ~0x0001LL;
870 else
871 #endif
872 whcr |= 0x0001LL;
873 }
874 #else
875 /*
876 * There is no way to know wheter 15-16M hole exists or not.
877 * Therefore, we disable write allocate for this range.
878 */
879 whcr &= ~0x0001LL;
880 #endif
881 wrmsr(0x0c0000082, whcr);
882
883 write_eflags(eflags);
884 }
885
886 void
887 enable_K6_2_wt_alloc(void)
888 {
889 quad_t size;
890 u_int64_t whcr;
891 u_long eflags;
892
893 eflags = read_eflags();
894 disable_intr();
895 wbinvd();
896
897 #ifdef CPU_DISABLE_CACHE
898 /*
899 * Certain K6-2 box becomes unstable when write allocation is
900 * enabled.
901 */
902 /*
903 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
904 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
905 * All other bits in TR12 have no effect on the processer's operation.
906 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
907 * on the AMD-K6.
908 */
909 wrmsr(0x0000000e, (u_int64_t)0x0008);
910 #endif
911 /* Don't assume that memory size is aligned with 4M. */
912 if (Maxmem > 0)
913 size = ((Maxmem >> 8) + 3) >> 2;
914 else
915 size = 0;
916
917 /* Limit is 4092M bytes. */
918 if (size > 0x3fff)
919 size = 0x3ff;
920 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
921
922 #if defined(PC98) || defined(NO_MEMORY_HOLE)
923 if (whcr & (0x3ffLL << 22)) {
924 #ifdef PC98
925 /*
926 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
927 * 15-16M range.
928 */
929 if (!(inb(0x43b) & 4))
930 whcr &= ~(1LL << 16);
931 else
932 #endif
933 whcr |= 1LL << 16;
934 }
935 #else
936 /*
937 * There is no way to know wheter 15-16M hole exists or not.
938 * Therefore, we disable write allocate for this range.
939 */
940 whcr &= ~(1LL << 16);
941 #endif
942 wrmsr(0x0c0000082, whcr);
943
944 write_eflags(eflags);
945 }
946 #endif /* I585_CPU && CPU_WT_ALLOC */
947
948 #include "opt_ddb.h"
949 #ifdef DDB
950 #include <ddb/ddb.h>
951
952 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
953 {
954 u_long eflags;
955 u_int cr0;
956 u_char ccr1, ccr2, ccr3;
957 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
958
959 cr0 = rcr0();
960 if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
961 eflags = read_eflags();
962 disable_intr();
963
964
965 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
966 ccr0 = read_cyrix_reg(CCR0);
967 }
968 ccr1 = read_cyrix_reg(CCR1);
969 ccr2 = read_cyrix_reg(CCR2);
970 ccr3 = read_cyrix_reg(CCR3);
971 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
972 write_cyrix_reg(CCR3, CCR3_MAPEN0);
973 ccr4 = read_cyrix_reg(CCR4);
974 if ((cpu == CPU_M1) || (cpu == CPU_M2))
975 ccr5 = read_cyrix_reg(CCR5);
976 else
977 pcr0 = read_cyrix_reg(PCR0);
978 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
979 }
980 write_eflags(eflags);
981
982 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
983 printf("CCR0=%x, ", (u_int)ccr0);
984
985 printf("CCR1=%x, CCR2=%x, CCR3=%x",
986 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
987 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
988 printf(", CCR4=%x, ", (u_int)ccr4);
989 if (cpu == CPU_M1SC)
990 printf("PCR0=%x\n", pcr0);
991 else
992 printf("CCR5=%x\n", ccr5);
993 }
994 }
995 printf("CR0=%x\n", cr0);
996 }
997 #endif /* DDB */
Cache object: 41f73977b387020f1aef0a63eaf339b4
|