1 /*
2 * Copyright (c) KATO Takenori, 1997, 1998.
3 *
4 * All rights reserved. Unpublished rights reserved under the copyright
5 * laws of Japan.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer as
13 * the first lines of this file unmodified.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 #include "opt_cpu.h"
33 #include "opt_failsafe.h"
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38
39 #include <machine/cputypes.h>
40 #include <machine/md_var.h>
41 #include <machine/specialreg.h>
42
43 void initializecpu(void);
44 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
45 void enable_K5_wt_alloc(void);
46 void enable_K6_wt_alloc(void);
47 void enable_K6_2_wt_alloc(void);
48 #endif
49
50 #ifdef I486_CPU
51 static void init_5x86(void);
52 static void init_bluelightning(void);
53 static void init_486dlc(void);
54 static void init_cy486dx(void);
55 #ifdef CPU_I486_ON_386
56 static void init_i486_on_386(void);
57 #endif
58 static void init_6x86(void);
59 #endif /* I486_CPU */
60
61 #ifdef I686_CPU
62 static void init_6x86MX(void);
63 static void init_ppro(void);
64 #endif
65
66 #ifdef I486_CPU
67 /*
68 * IBM Blue Lightning
69 */
70 static void
71 init_bluelightning(void)
72 {
73 u_long eflags;
74
75 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
76 need_post_dma_flush = 1;
77 #endif
78
79 eflags = read_eflags();
80 disable_intr();
81
82 load_cr0(rcr0() | CR0_CD | CR0_NW);
83 invd();
84
85 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
86 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */
87 #else
88 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */
89 #endif
90 /* Enables 13MB and 0-640KB cache. */
91 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
92 #ifdef CPU_BLUELIGHTNING_3X
93 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */
94 #else
95 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */
96 #endif
97
98 /* Enable caching in CR0. */
99 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
100 invd();
101 write_eflags(eflags);
102 }
103
104 /*
105 * Cyrix 486SLC/DLC/SR/DR series
106 */
107 static void
108 init_486dlc(void)
109 {
110 u_long eflags;
111 u_char ccr0;
112
113 eflags = read_eflags();
114 disable_intr();
115 invd();
116
117 ccr0 = read_cyrix_reg(CCR0);
118 #ifndef CYRIX_CACHE_WORKS
119 ccr0 |= CCR0_NC1 | CCR0_BARB;
120 write_cyrix_reg(CCR0, ccr0);
121 invd();
122 #else
123 ccr0 &= ~CCR0_NC0;
124 #ifndef CYRIX_CACHE_REALLY_WORKS
125 ccr0 |= CCR0_NC1 | CCR0_BARB;
126 #else
127 ccr0 |= CCR0_NC1;
128 #endif
129 #ifdef CPU_DIRECT_MAPPED_CACHE
130 ccr0 |= CCR0_CO; /* Direct mapped mode. */
131 #endif
132 write_cyrix_reg(CCR0, ccr0);
133
134 /* Clear non-cacheable region. */
135 write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
136 write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
137 write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
138 write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
139
140 write_cyrix_reg(0, 0); /* dummy write */
141
142 /* Enable caching in CR0. */
143 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
144 invd();
145 #endif /* !CYRIX_CACHE_WORKS */
146 write_eflags(eflags);
147 }
148
149
150 /*
151 * Cyrix 486S/DX series
152 */
153 static void
154 init_cy486dx(void)
155 {
156 u_long eflags;
157 u_char ccr2;
158
159 eflags = read_eflags();
160 disable_intr();
161 invd();
162
163 ccr2 = read_cyrix_reg(CCR2);
164 #ifdef CPU_SUSP_HLT
165 ccr2 |= CCR2_SUSP_HLT;
166 #endif
167 write_cyrix_reg(CCR2, ccr2);
168 write_eflags(eflags);
169 }
170
171
172 /*
173 * Cyrix 5x86
174 */
175 static void
176 init_5x86(void)
177 {
178 u_long eflags;
179 u_char ccr2, ccr3, ccr4, pcr0;
180
181 eflags = read_eflags();
182 disable_intr();
183
184 load_cr0(rcr0() | CR0_CD | CR0_NW);
185 wbinvd();
186
187 (void)read_cyrix_reg(CCR3); /* dummy */
188
189 /* Initialize CCR2. */
190 ccr2 = read_cyrix_reg(CCR2);
191 ccr2 |= CCR2_WB;
192 #ifdef CPU_SUSP_HLT
193 ccr2 |= CCR2_SUSP_HLT;
194 #else
195 ccr2 &= ~CCR2_SUSP_HLT;
196 #endif
197 ccr2 |= CCR2_WT1;
198 write_cyrix_reg(CCR2, ccr2);
199
200 /* Initialize CCR4. */
201 ccr3 = read_cyrix_reg(CCR3);
202 write_cyrix_reg(CCR3, CCR3_MAPEN0);
203
204 ccr4 = read_cyrix_reg(CCR4);
205 ccr4 |= CCR4_DTE;
206 ccr4 |= CCR4_MEM;
207 #ifdef CPU_FASTER_5X86_FPU
208 ccr4 |= CCR4_FASTFPE;
209 #else
210 ccr4 &= ~CCR4_FASTFPE;
211 #endif
212 ccr4 &= ~CCR4_IOMASK;
213 /********************************************************************
214 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
215 * should be 0 for errata fix.
216 ********************************************************************/
217 #ifdef CPU_IORT
218 ccr4 |= CPU_IORT & CCR4_IOMASK;
219 #endif
220 write_cyrix_reg(CCR4, ccr4);
221
222 /* Initialize PCR0. */
223 /****************************************************************
224 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
225 * BTB_EN might make your system unstable.
226 ****************************************************************/
227 pcr0 = read_cyrix_reg(PCR0);
228 #ifdef CPU_RSTK_EN
229 pcr0 |= PCR0_RSTK;
230 #else
231 pcr0 &= ~PCR0_RSTK;
232 #endif
233 #ifdef CPU_BTB_EN
234 pcr0 |= PCR0_BTB;
235 #else
236 pcr0 &= ~PCR0_BTB;
237 #endif
238 #ifdef CPU_LOOP_EN
239 pcr0 |= PCR0_LOOP;
240 #else
241 pcr0 &= ~PCR0_LOOP;
242 #endif
243
244 /****************************************************************
245 * WARNING: if you use a memory mapped I/O device, don't use
246 * DISABLE_5X86_LSSER option, which may reorder memory mapped
247 * I/O access.
248 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
249 ****************************************************************/
250 #ifdef CPU_DISABLE_5X86_LSSER
251 pcr0 &= ~PCR0_LSSER;
252 #else
253 pcr0 |= PCR0_LSSER;
254 #endif
255 write_cyrix_reg(PCR0, pcr0);
256
257 /* Restore CCR3. */
258 write_cyrix_reg(CCR3, ccr3);
259
260 (void)read_cyrix_reg(0x80); /* dummy */
261
262 /* Unlock NW bit in CR0. */
263 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
264 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */
265 /* Lock NW bit in CR0. */
266 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
267
268 write_eflags(eflags);
269 }
270
271 #ifdef CPU_I486_ON_386
272 /*
273 * There are i486 based upgrade products for i386 machines.
274 * In this case, BIOS doesn't enables CPU cache.
275 */
276 void
277 init_i486_on_386(void)
278 {
279 u_long eflags;
280
281 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
282 need_post_dma_flush = 1;
283 #endif
284
285 eflags = read_eflags();
286 disable_intr();
287
288 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */
289
290 write_eflags(eflags);
291 }
292 #endif
293
294 /*
295 * Cyrix 6x86
296 *
297 * XXX - What should I do here? Please let me know.
298 */
299 static void
300 init_6x86(void)
301 {
302 u_long eflags;
303 u_char ccr3, ccr4;
304
305 eflags = read_eflags();
306 disable_intr();
307
308 load_cr0(rcr0() | CR0_CD | CR0_NW);
309 wbinvd();
310
311 /* Initialize CCR0. */
312 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
313
314 /* Initialize CCR1. */
315 #ifdef CPU_CYRIX_NO_LOCK
316 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
317 #else
318 #ifdef FAILSAFE
319 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
320 #endif
321 #endif
322
323 /* Initialize CCR2. */
324 #ifdef CPU_SUSP_HLT
325 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
326 #else
327 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
328 #endif
329
330 ccr3 = read_cyrix_reg(CCR3);
331 write_cyrix_reg(CCR3, CCR3_MAPEN0);
332
333 /* Initialize CCR4. */
334 ccr4 = read_cyrix_reg(CCR4);
335 ccr4 |= CCR4_DTE;
336 ccr4 &= ~CCR4_IOMASK;
337 #ifdef CPU_IORT
338 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
339 #else
340 write_cyrix_reg(CCR4, ccr4 | 7);
341 #endif
342
343 /* Initialize CCR5. */
344 #ifdef CPU_WT_ALLOC
345 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
346 #endif
347
348 /* Restore CCR3. */
349 write_cyrix_reg(CCR3, ccr3);
350
351 /* Unlock NW bit in CR0. */
352 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
353
354 /*
355 * Earlier revision of the 6x86 CPU could crash the system if
356 * L1 cache is in write-back mode.
357 */
358 if ((cyrix_did & 0xff00) > 0x1600)
359 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
360 else {
361 /* Revision 2.6 and lower. */
362 #ifdef CYRIX_CACHE_REALLY_WORKS
363 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
364 #else
365 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */
366 #endif
367 }
368
369 /* Lock NW bit in CR0. */
370 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
371
372 write_eflags(eflags);
373 }
374 #endif /* I486_CPU */
375
376 #ifdef I686_CPU
377 /*
378 * Cyrix 6x86MX (code-named M2)
379 *
380 * XXX - What should I do here? Please let me know.
381 */
382 static void
383 init_6x86MX(void)
384 {
385 u_long eflags;
386 u_char ccr3, ccr4;
387
388 eflags = read_eflags();
389 disable_intr();
390
391 load_cr0(rcr0() | CR0_CD | CR0_NW);
392 wbinvd();
393
394 /* Initialize CCR0. */
395 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
396
397 /* Initialize CCR1. */
398 #ifdef CPU_CYRIX_NO_LOCK
399 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
400 #else
401 #ifdef FAILSAFE
402 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
403 #endif
404 #endif
405
406 /* Initialize CCR2. */
407 #ifdef CPU_SUSP_HLT
408 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
409 #else
410 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
411 #endif
412
413 ccr3 = read_cyrix_reg(CCR3);
414 write_cyrix_reg(CCR3, CCR3_MAPEN0);
415
416 /* Initialize CCR4. */
417 ccr4 = read_cyrix_reg(CCR4);
418 ccr4 &= ~CCR4_IOMASK;
419 #ifdef CPU_IORT
420 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
421 #else
422 write_cyrix_reg(CCR4, ccr4 | 7);
423 #endif
424
425 /* Initialize CCR5. */
426 #ifdef CPU_WT_ALLOC
427 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
428 #endif
429
430 /* Restore CCR3. */
431 write_cyrix_reg(CCR3, ccr3);
432
433 /* Unlock NW bit in CR0. */
434 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
435
436 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */
437
438 /* Lock NW bit in CR0. */
439 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
440
441 write_eflags(eflags);
442 }
443
444 static void
445 init_ppro(void)
446 {
447 #ifndef SMP
448 u_int64_t apicbase;
449
450 /*
451 * Local APIC should be diabled in UP kernel.
452 */
453 apicbase = rdmsr(0x1b);
454 apicbase &= ~0x800LL;
455 wrmsr(0x1b, apicbase);
456 #endif
457 }
458 #endif /* I686_CPU */
459
460 void
461 initializecpu(void)
462 {
463
464 switch (cpu) {
465 #ifdef I486_CPU
466 case CPU_BLUE:
467 init_bluelightning();
468 break;
469 case CPU_486DLC:
470 init_486dlc();
471 break;
472 case CPU_CY486DX:
473 init_cy486dx();
474 break;
475 case CPU_M1SC:
476 init_5x86();
477 break;
478 #ifdef CPU_I486_ON_386
479 case CPU_486:
480 init_i486_on_386();
481 break;
482 #endif
483 case CPU_M1:
484 init_6x86();
485 break;
486 #endif /* I486_CPU */
487 #ifdef I686_CPU
488 case CPU_M2:
489 init_6x86MX();
490 break;
491 case CPU_686:
492 if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
493 (cpu_id & 0xff0) == 0x610)
494 init_ppro();
495 break;
496 #endif
497 default:
498 break;
499 }
500
501 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
502 /*
503 * OS should flush L1 cahce by itself because no PC-98 supports
504 * non-Intel CPUs. Use wbinvd instruction before DMA transfer
505 * when need_pre_dma_flush = 1, use invd instruction after DMA
506 * transfer when need_post_dma_flush = 1. If your CPU upgrade
507 * product support hardware cache control, you can add
508 * UPGRADE_CPU_HW_CACHE option in your kernel configuration file.
509 * This option elminate unneeded cache flush instruction.
510 */
511 if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
512 switch (cpu) {
513 #ifdef I486_CPU
514 case CPU_486DLC:
515 need_post_dma_flush = 1;
516 break;
517 case CPU_M1SC:
518 need_pre_dma_flush = 1;
519 break;
520 #endif
521 default:
522 break;
523 }
524 } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
525 switch (cpu_id & 0xFF0) {
526 case 0x470: /* Enhanced Am486DX2 WB */
527 case 0x490: /* Enhanced Am486DX4 WB */
528 case 0x4F0: /* Am5x86 WB */
529 need_pre_dma_flush = 1;
530 break;
531 }
532 } else if (strcmp(cpu_vendor, "IBM") == 0) {
533 need_post_dma_flush = 1;
534 } else {
535 #ifdef CPU_I486_ON_386
536 need_pre_dma_flush = 1;
537 #endif
538 }
539 #endif /* PC98 && !UPGRADE_CPU_HW_CACHE */
540 }
541
542 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
543 /*
544 * Enable write allocate feature of AMD processors.
545 * Following two functions require the Maxmem variable being set.
546 */
547 void
548 enable_K5_wt_alloc(void)
549 {
550 u_int64_t msr;
551
552 /*
553 * Write allocate is supported only on models 1, 2, and 3, with
554 * a stepping of 4 or greater.
555 */
556 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
557 disable_intr();
558 msr = rdmsr(0x83); /* HWCR */
559 wrmsr(0x83, msr & !(0x10));
560
561 /*
562 * We have to tell the chip where the top of memory is,
563 * since video cards could have frame bufferes there,
564 * memory-mapped I/O could be there, etc.
565 */
566 if(Maxmem > 0)
567 msr = Maxmem / 16;
568 else
569 msr = 0;
570 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
571 #ifdef PC98
572 if (!(inb(0x43b) & 4)) {
573 wrmsr(0x86, 0x0ff00f0);
574 msr |= AMD_WT_ALLOC_PRE;
575 }
576 #else
577 /*
578 * There is no way to know wheter 15-16M hole exists or not.
579 * Therefore, we disable write allocate for this range.
580 */
581 wrmsr(0x86, 0x0ff00f0);
582 msr |= AMD_WT_ALLOC_PRE;
583 #endif
584 wrmsr(0x85, msr);
585
586 msr=rdmsr(0x83);
587 wrmsr(0x83, msr|0x10); /* enable write allocate */
588
589 enable_intr();
590 }
591 }
592
593 void
594 enable_K6_wt_alloc(void)
595 {
596 quad_t size;
597 u_int64_t whcr;
598 u_long eflags;
599
600 eflags = read_eflags();
601 disable_intr();
602 wbinvd();
603
604 #ifdef CPU_DISABLE_CACHE
605 /*
606 * Certain K6-2 box becomes unstable when write allocation is
607 * enabled.
608 */
609 /*
610 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
611 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
612 * All other bits in TR12 have no effect on the processer's operation.
613 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
614 * on the AMD-K6.
615 */
616 wrmsr(0x0000000e, (u_int64_t)0x0008);
617 #endif
618 /* Don't assume that memory size is aligned with 4M. */
619 if (Maxmem > 0)
620 size = ((Maxmem >> 8) + 3) >> 2;
621 else
622 size = 0;
623
624 /* Limit is 508M bytes. */
625 if (size > 0x7f)
626 size = 0x7f;
627 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
628
629 #if defined(PC98) || defined(NO_MEMORY_HOLE)
630 if (whcr & (0x7fLL << 1)) {
631 #ifdef PC98
632 /*
633 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
634 * 15-16M range.
635 */
636 if (!(inb(0x43b) & 4))
637 whcr &= ~0x0001LL;
638 else
639 #endif
640 whcr |= 0x0001LL;
641 }
642 #else
643 /*
644 * There is no way to know wheter 15-16M hole exists or not.
645 * Therefore, we disable write allocate for this range.
646 */
647 whcr &= ~0x0001LL;
648 #endif
649 wrmsr(0x0c0000082, whcr);
650
651 write_eflags(eflags);
652 enable_intr();
653 }
654
655 void
656 enable_K6_2_wt_alloc(void)
657 {
658 quad_t size;
659 u_int64_t whcr;
660 u_long eflags;
661
662 eflags = read_eflags();
663 disable_intr();
664 wbinvd();
665
666 #ifdef CPU_DISABLE_CACHE
667 /*
668 * Certain K6-2 box becomes unstable when write allocation is
669 * enabled.
670 */
671 /*
672 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
673 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
674 * All other bits in TR12 have no effect on the processer's operation.
675 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
676 * on the AMD-K6.
677 */
678 wrmsr(0x0000000e, (u_int64_t)0x0008);
679 #endif
680 /* Don't assume that memory size is aligned with 4M. */
681 if (Maxmem > 0)
682 size = ((Maxmem >> 8) + 3) >> 2;
683 else
684 size = 0;
685
686 /* Limit is 4092M bytes. */
687 if (size > 0x3fff)
688 size = 0x3ff;
689 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
690
691 #if defined(PC98) || defined(NO_MEMORY_HOLE)
692 if (whcr & (0x3ffLL << 22)) {
693 #ifdef PC98
694 /*
695 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
696 * 15-16M range.
697 */
698 if (!(inb(0x43b) & 4))
699 whcr &= ~(1LL << 16);
700 else
701 #endif
702 whcr |= 1LL << 16;
703 }
704 #else
705 /*
706 * There is no way to know wheter 15-16M hole exists or not.
707 * Therefore, we disable write allocate for this range.
708 */
709 whcr &= ~(1LL << 16);
710 #endif
711 wrmsr(0x0c0000082, whcr);
712
713 write_eflags(eflags);
714 enable_intr();
715 }
716 #endif /* I585_CPU && CPU_WT_ALLOC */
717
718 #include "opt_ddb.h"
719 #ifdef DDB
720 #include <ddb/ddb.h>
721
722 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
723 {
724 u_long eflags;
725 u_int cr0;
726 u_char ccr1, ccr2, ccr3;
727 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
728
729 cr0 = rcr0();
730 if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
731 eflags = read_eflags();
732 disable_intr();
733
734
735 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
736 ccr0 = read_cyrix_reg(CCR0);
737 }
738 ccr1 = read_cyrix_reg(CCR1);
739 ccr2 = read_cyrix_reg(CCR2);
740 ccr3 = read_cyrix_reg(CCR3);
741 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
742 write_cyrix_reg(CCR3, CCR3_MAPEN0);
743 ccr4 = read_cyrix_reg(CCR4);
744 if ((cpu == CPU_M1) || (cpu == CPU_M2))
745 ccr5 = read_cyrix_reg(CCR5);
746 else
747 pcr0 = read_cyrix_reg(PCR0);
748 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */
749 }
750 write_eflags(eflags);
751
752 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
753 printf("CCR0=%x, ", (u_int)ccr0);
754
755 printf("CCR1=%x, CCR2=%x, CCR3=%x",
756 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
757 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
758 printf(", CCR4=%x, ", (u_int)ccr4);
759 if (cpu == CPU_M1SC)
760 printf("PCR0=%x\n", pcr0);
761 else
762 printf("CCR5=%x\n", ccr5);
763 }
764 }
765 printf("CR0=%x\n", cr0);
766 }
767 #endif /* DDB */
Cache object: a2c4e49dab9ad05f6fdb57a81e752829
|