1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37
38 #include <machine/md_var.h>
39 #include <machine/specialreg.h>
40
41 /*
42 * amd64 memory range operations
43 *
44 * This code will probably be impenetrable without reference to the
45 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
46 */
47
48 static char *mem_owner_bios = "BIOS";
49
50 #define MR686_FIXMTRR (1<<0)
51
52 #define mrwithin(mr, a) \
53 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
54 #define mroverlap(mra, mrb) \
55 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
56
57 #define mrvalid(base, len) \
58 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
59 ((len) >= (1 << 12)) && /* length is >= 4k */ \
60 powerof2((len)) && /* ... and power of two */ \
61 !((base) & ((len) - 1))) /* range is not discontiuous */
62
63 #define mrcopyflags(curr, new) \
64 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
65
66 static int mtrrs_disabled;
67 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
68 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
69 &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
70
71 static void amd64_mrinit(struct mem_range_softc *sc);
72 static int amd64_mrset(struct mem_range_softc *sc,
73 struct mem_range_desc *mrd, int *arg);
74 static void amd64_mrAPinit(struct mem_range_softc *sc);
75
76 static struct mem_range_ops amd64_mrops = {
77 amd64_mrinit,
78 amd64_mrset,
79 amd64_mrAPinit
80 };
81
82 /* XXX for AP startup hook */
83 static u_int64_t mtrrcap, mtrrdef;
84
85 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
86 static u_int64_t mtrr_physmask;
87
88 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
89 struct mem_range_desc *mrd);
90 static void amd64_mrfetch(struct mem_range_softc *sc);
91 static int amd64_mtrrtype(int flags);
92 static int amd64_mrt2mtrr(int flags, int oldval);
93 static int amd64_mtrrconflict(int flag1, int flag2);
94 static void amd64_mrstore(struct mem_range_softc *sc);
95 static void amd64_mrstoreone(void *arg);
96 static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
97 u_int64_t addr);
98 static int amd64_mrsetlow(struct mem_range_softc *sc,
99 struct mem_range_desc *mrd, int *arg);
100 static int amd64_mrsetvariable(struct mem_range_softc *sc,
101 struct mem_range_desc *mrd, int *arg);
102
103 /* amd64 MTRR type to memory range type conversion */
104 static int amd64_mtrrtomrt[] = {
105 MDF_UNCACHEABLE,
106 MDF_WRITECOMBINE,
107 MDF_UNKNOWN,
108 MDF_UNKNOWN,
109 MDF_WRITETHROUGH,
110 MDF_WRITEPROTECT,
111 MDF_WRITEBACK
112 };
113
114 #define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
115
116 static int
117 amd64_mtrr2mrt(int val)
118 {
119
120 if (val < 0 || val >= MTRRTOMRTLEN)
121 return (MDF_UNKNOWN);
122 return (amd64_mtrrtomrt[val]);
123 }
124
125 /*
126 * amd64 MTRR conflicts. Writeback and uncachable may overlap.
127 */
128 static int
129 amd64_mtrrconflict(int flag1, int flag2)
130 {
131
132 flag1 &= MDF_ATTRMASK;
133 flag2 &= MDF_ATTRMASK;
134 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
135 return (1);
136 if (flag1 == flag2 ||
137 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
138 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
139 return (0);
140 return (1);
141 }
142
143 /*
144 * Look for an exactly-matching range.
145 */
146 static struct mem_range_desc *
147 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
148 {
149 struct mem_range_desc *cand;
150 int i;
151
152 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
153 if ((cand->mr_base == mrd->mr_base) &&
154 (cand->mr_len == mrd->mr_len))
155 return (cand);
156 return (NULL);
157 }
158
159 /*
160 * Fetch the current mtrr settings from the current CPU (assumed to
161 * all be in sync in the SMP case). Note that if we are here, we
162 * assume that MTRRs are enabled, and we may or may not have fixed
163 * MTRRs.
164 */
165 static void
166 amd64_mrfetch(struct mem_range_softc *sc)
167 {
168 struct mem_range_desc *mrd;
169 u_int64_t msrv;
170 int i, j, msr;
171
172 mrd = sc->mr_desc;
173
174 /* Get fixed-range MTRRs. */
175 if (sc->mr_cap & MR686_FIXMTRR) {
176 msr = MSR_MTRR64kBase;
177 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
178 msrv = rdmsr(msr);
179 for (j = 0; j < 8; j++, mrd++) {
180 mrd->mr_flags =
181 (mrd->mr_flags & ~MDF_ATTRMASK) |
182 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
183 if (mrd->mr_owner[0] == 0)
184 strcpy(mrd->mr_owner, mem_owner_bios);
185 msrv = msrv >> 8;
186 }
187 }
188 msr = MSR_MTRR16kBase;
189 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
190 msrv = rdmsr(msr);
191 for (j = 0; j < 8; j++, mrd++) {
192 mrd->mr_flags =
193 (mrd->mr_flags & ~MDF_ATTRMASK) |
194 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
195 if (mrd->mr_owner[0] == 0)
196 strcpy(mrd->mr_owner, mem_owner_bios);
197 msrv = msrv >> 8;
198 }
199 }
200 msr = MSR_MTRR4kBase;
201 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
202 msrv = rdmsr(msr);
203 for (j = 0; j < 8; j++, mrd++) {
204 mrd->mr_flags =
205 (mrd->mr_flags & ~MDF_ATTRMASK) |
206 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
207 if (mrd->mr_owner[0] == 0)
208 strcpy(mrd->mr_owner, mem_owner_bios);
209 msrv = msrv >> 8;
210 }
211 }
212 }
213
214 /* Get remainder which must be variable MTRRs. */
215 msr = MSR_MTRRVarBase;
216 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
217 msrv = rdmsr(msr);
218 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
219 amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
220 mrd->mr_base = msrv & mtrr_physmask;
221 msrv = rdmsr(msr + 1);
222 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
223 (mrd->mr_flags | MDF_ACTIVE) :
224 (mrd->mr_flags & ~MDF_ACTIVE);
225
226 /* Compute the range from the mask. Ick. */
227 mrd->mr_len = (~(msrv & mtrr_physmask) &
228 (mtrr_physmask | 0xfffL)) + 1;
229 if (!mrvalid(mrd->mr_base, mrd->mr_len))
230 mrd->mr_flags |= MDF_BOGUS;
231
232 /* If unclaimed and active, must be the BIOS. */
233 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
234 strcpy(mrd->mr_owner, mem_owner_bios);
235 }
236 }
237
238 /*
239 * Return the MTRR memory type matching a region's flags
240 */
241 static int
242 amd64_mtrrtype(int flags)
243 {
244 int i;
245
246 flags &= MDF_ATTRMASK;
247
248 for (i = 0; i < MTRRTOMRTLEN; i++) {
249 if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
250 continue;
251 if (flags == amd64_mtrrtomrt[i])
252 return (i);
253 }
254 return (-1);
255 }
256
257 static int
258 amd64_mrt2mtrr(int flags, int oldval)
259 {
260 int val;
261
262 if ((val = amd64_mtrrtype(flags)) == -1)
263 return (oldval & 0xff);
264 return (val & 0xff);
265 }
266
267 /*
268 * Update running CPU(s) MTRRs to match the ranges in the descriptor
269 * list.
270 *
271 * XXX Must be called with interrupts enabled.
272 */
273 static void
274 amd64_mrstore(struct mem_range_softc *sc)
275 {
276 #ifdef SMP
277 /*
278 * We should use ipi_all_but_self() to call other CPUs into a
279 * locking gate, then call a target function to do this work.
280 * The "proper" solution involves a generalised locking gate
281 * implementation, not ready yet.
282 */
283 smp_rendezvous(NULL, amd64_mrstoreone, NULL, sc);
284 #else
285 disable_intr(); /* disable interrupts */
286 amd64_mrstoreone(sc);
287 enable_intr();
288 #endif
289 }
290
291 /*
292 * Update the current CPU's MTRRs with those represented in the
293 * descriptor list. Note that we do this wholesale rather than just
294 * stuffing one entry; this is simpler (but slower, of course).
295 */
296 static void
297 amd64_mrstoreone(void *arg)
298 {
299 struct mem_range_softc *sc = arg;
300 struct mem_range_desc *mrd;
301 u_int64_t omsrv, msrv;
302 int i, j, msr;
303 u_int cr4save;
304
305 mrd = sc->mr_desc;
306
307 /* Disable PGE. */
308 cr4save = rcr4();
309 if (cr4save & CR4_PGE)
310 load_cr4(cr4save & ~CR4_PGE);
311
312 /* Disable caches (CD = 1, NW = 0). */
313 load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
314
315 /* Flushes caches and TLBs. */
316 wbinvd();
317
318 /* Disable MTRRs (E = 0). */
319 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
320
321 /* Set fixed-range MTRRs. */
322 if (sc->mr_cap & MR686_FIXMTRR) {
323 msr = MSR_MTRR64kBase;
324 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
325 msrv = 0;
326 omsrv = rdmsr(msr);
327 for (j = 7; j >= 0; j--) {
328 msrv = msrv << 8;
329 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
330 omsrv >> (j * 8));
331 }
332 wrmsr(msr, msrv);
333 mrd += 8;
334 }
335 msr = MSR_MTRR16kBase;
336 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
337 msrv = 0;
338 omsrv = rdmsr(msr);
339 for (j = 7; j >= 0; j--) {
340 msrv = msrv << 8;
341 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
342 omsrv >> (j * 8));
343 }
344 wrmsr(msr, msrv);
345 mrd += 8;
346 }
347 msr = MSR_MTRR4kBase;
348 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
349 msrv = 0;
350 omsrv = rdmsr(msr);
351 for (j = 7; j >= 0; j--) {
352 msrv = msrv << 8;
353 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
354 omsrv >> (j * 8));
355 }
356 wrmsr(msr, msrv);
357 mrd += 8;
358 }
359 }
360
361 /* Set remainder which must be variable MTRRs. */
362 msr = MSR_MTRRVarBase;
363 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
364 /* base/type register */
365 omsrv = rdmsr(msr);
366 if (mrd->mr_flags & MDF_ACTIVE) {
367 msrv = mrd->mr_base & mtrr_physmask;
368 msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
369 } else {
370 msrv = 0;
371 }
372 wrmsr(msr, msrv);
373
374 /* mask/active register */
375 if (mrd->mr_flags & MDF_ACTIVE) {
376 msrv = MTRR_PHYSMASK_VALID |
377 (~(mrd->mr_len - 1) & mtrr_physmask);
378 } else {
379 msrv = 0;
380 }
381 wrmsr(msr + 1, msrv);
382 }
383
384 /* Flush caches, TLBs. */
385 wbinvd();
386
387 /* Enable MTRRs. */
388 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
389
390 /* Enable caches (CD = 0, NW = 0). */
391 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
392
393 /* Restore PGE. */
394 load_cr4(cr4save);
395 }
396
397 /*
398 * Hunt for the fixed MTRR referencing (addr)
399 */
400 static struct mem_range_desc *
401 amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
402 {
403 struct mem_range_desc *mrd;
404 int i;
405
406 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
407 i++, mrd++)
408 if ((addr >= mrd->mr_base) &&
409 (addr < (mrd->mr_base + mrd->mr_len)))
410 return (mrd);
411 return (NULL);
412 }
413
414 /*
415 * Try to satisfy the given range request by manipulating the fixed
416 * MTRRs that cover low memory.
417 *
418 * Note that we try to be generous here; we'll bloat the range out to
419 * the next higher/lower boundary to avoid the consumer having to know
420 * too much about the mechanisms here.
421 *
422 * XXX note that this will have to be updated when we start supporting
423 * "busy" ranges.
424 */
425 static int
426 amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
427 {
428 struct mem_range_desc *first_md, *last_md, *curr_md;
429
430 /* Range check. */
431 if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
432 ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
433 return (EINVAL);
434
435 /* Check that we aren't doing something risky. */
436 if (!(mrd->mr_flags & MDF_FORCE))
437 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
438 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
439 return (EACCES);
440 }
441
442 /* Set flags, clear set-by-firmware flag. */
443 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
444 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
445 ~MDF_FIRMWARE, mrd->mr_flags);
446 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
447 }
448
449 return (0);
450 }
451
452 /*
453 * Modify/add a variable MTRR to satisfy the request.
454 *
455 * XXX needs to be updated to properly support "busy" ranges.
456 */
457 static int
458 amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
459 int *arg)
460 {
461 struct mem_range_desc *curr_md, *free_md;
462 int i;
463
464 /*
465 * Scan the currently active variable descriptors, look for
466 * one we exactly match (straight takeover) and for possible
467 * accidental overlaps.
468 *
469 * Keep track of the first empty variable descriptor in case
470 * we can't perform a takeover.
471 */
472 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
473 curr_md = sc->mr_desc + i;
474 free_md = NULL;
475 for (; i < sc->mr_ndesc; i++, curr_md++) {
476 if (curr_md->mr_flags & MDF_ACTIVE) {
477 /* Exact match? */
478 if ((curr_md->mr_base == mrd->mr_base) &&
479 (curr_md->mr_len == mrd->mr_len)) {
480
481 /* Whoops, owned by someone. */
482 if (curr_md->mr_flags & MDF_BUSY)
483 return (EBUSY);
484
485 /* Check that we aren't doing something risky */
486 if (!(mrd->mr_flags & MDF_FORCE) &&
487 ((curr_md->mr_flags & MDF_ATTRMASK) ==
488 MDF_UNKNOWN))
489 return (EACCES);
490
491 /* Ok, just hijack this entry. */
492 free_md = curr_md;
493 break;
494 }
495
496 /* Non-exact overlap? */
497 if (mroverlap(curr_md, mrd)) {
498 /* Between conflicting region types? */
499 if (amd64_mtrrconflict(curr_md->mr_flags,
500 mrd->mr_flags))
501 return (EINVAL);
502 }
503 } else if (free_md == NULL) {
504 free_md = curr_md;
505 }
506 }
507
508 /* Got somewhere to put it? */
509 if (free_md == NULL)
510 return (ENOSPC);
511
512 /* Set up new descriptor. */
513 free_md->mr_base = mrd->mr_base;
514 free_md->mr_len = mrd->mr_len;
515 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
516 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
517 return (0);
518 }
519
520 /*
521 * Handle requests to set memory range attributes by manipulating MTRRs.
522 */
523 static int
524 amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
525 {
526 struct mem_range_desc *targ;
527 int error = 0;
528
529 switch(*arg) {
530 case MEMRANGE_SET_UPDATE:
531 /*
532 * Make sure that what's being asked for is even
533 * possible at all.
534 */
535 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
536 amd64_mtrrtype(mrd->mr_flags) == -1)
537 return (EINVAL);
538
539 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
540
541 /* Are the "low memory" conditions applicable? */
542 if ((sc->mr_cap & MR686_FIXMTRR) &&
543 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
544 if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
545 return (error);
546 } else {
547 /* It's time to play with variable MTRRs. */
548 if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
549 return (error);
550 }
551 break;
552
553 case MEMRANGE_SET_REMOVE:
554 if ((targ = mem_range_match(sc, mrd)) == NULL)
555 return (ENOENT);
556 if (targ->mr_flags & MDF_FIXACTIVE)
557 return (EPERM);
558 if (targ->mr_flags & MDF_BUSY)
559 return (EBUSY);
560 targ->mr_flags &= ~MDF_ACTIVE;
561 targ->mr_owner[0] = 0;
562 break;
563
564 default:
565 return (EOPNOTSUPP);
566 }
567
568 /* Update the hardware. */
569 amd64_mrstore(sc);
570
571 /* Refetch to see where we're at. */
572 amd64_mrfetch(sc);
573 return (0);
574 }
575
576 /*
577 * Work out how many ranges we support, initialise storage for them,
578 * and fetch the initial settings.
579 */
580 static void
581 amd64_mrinit(struct mem_range_softc *sc)
582 {
583 struct mem_range_desc *mrd;
584 u_int regs[4];
585 int i, nmdesc = 0, pabits;
586
587 mtrrcap = rdmsr(MSR_MTRRcap);
588 mtrrdef = rdmsr(MSR_MTRRdefType);
589
590 /* For now, bail out if MTRRs are not enabled. */
591 if (!(mtrrdef & MTRR_DEF_ENABLE)) {
592 if (bootverbose)
593 printf("CPU supports MTRRs but not enabled\n");
594 return;
595 }
596 nmdesc = mtrrcap & MTRR_CAP_VCNT;
597
598 /*
599 * Determine the size of the PhysMask and PhysBase fields in
600 * the variable range MTRRs. If the extended CPUID 0x80000008
601 * is present, use that to figure out how many physical
602 * address bits the CPU supports. Otherwise, default to 36
603 * address bits.
604 */
605 if (cpu_exthigh >= 0x80000008) {
606 do_cpuid(0x80000008, regs);
607 pabits = regs[0] & 0xff;
608 } else
609 pabits = 36;
610 mtrr_physmask = ((1UL << pabits) - 1) & ~0xfffUL;
611
612 /* If fixed MTRRs supported and enabled. */
613 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
614 sc->mr_cap = MR686_FIXMTRR;
615 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
616 }
617
618 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
619 M_WAITOK | M_ZERO);
620 sc->mr_ndesc = nmdesc;
621
622 mrd = sc->mr_desc;
623
624 /* Populate the fixed MTRR entries' base/length. */
625 if (sc->mr_cap & MR686_FIXMTRR) {
626 for (i = 0; i < MTRR_N64K; i++, mrd++) {
627 mrd->mr_base = i * 0x10000;
628 mrd->mr_len = 0x10000;
629 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
630 MDF_FIXACTIVE;
631 }
632 for (i = 0; i < MTRR_N16K; i++, mrd++) {
633 mrd->mr_base = i * 0x4000 + 0x80000;
634 mrd->mr_len = 0x4000;
635 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
636 MDF_FIXACTIVE;
637 }
638 for (i = 0; i < MTRR_N4K; i++, mrd++) {
639 mrd->mr_base = i * 0x1000 + 0xc0000;
640 mrd->mr_len = 0x1000;
641 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
642 MDF_FIXACTIVE;
643 }
644 }
645
646 /*
647 * Get current settings, anything set now is considered to
648 * have been set by the firmware. (XXX has something already
649 * played here?)
650 */
651 amd64_mrfetch(sc);
652 mrd = sc->mr_desc;
653 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
654 if (mrd->mr_flags & MDF_ACTIVE)
655 mrd->mr_flags |= MDF_FIRMWARE;
656 }
657 }
658
659 /*
660 * Initialise MTRRs on an AP after the BSP has run the init code.
661 */
662 static void
663 amd64_mrAPinit(struct mem_range_softc *sc)
664 {
665
666 amd64_mrstoreone(sc);
667 wrmsr(MSR_MTRRdefType, mtrrdef);
668 }
669
670 static void
671 amd64_mem_drvinit(void *unused)
672 {
673
674 if (mtrrs_disabled)
675 return;
676 if (!(cpu_feature & CPUID_MTRR))
677 return;
678 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
679 return;
680 if ((strcmp(cpu_vendor, "GenuineIntel") != 0) &&
681 (strcmp(cpu_vendor, "AuthenticAMD") != 0))
682 return;
683 mem_range_softc.mr_op = &amd64_mrops;
684 }
685 SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);
Cache object: d94db25a4c3284d75ec9af889ab62b80
|