1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.2/sys/amd64/amd64/amd64_mem.c 133255 2004-08-07 06:21:37Z scottl $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37
38 #include <machine/md_var.h>
39 #include <machine/specialreg.h>
40
41 /*
42 * amd64 memory range operations
43 *
44 * This code will probably be impenetrable without reference to the
45 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
46 */
47
48 static char *mem_owner_bios = "BIOS";
49
50 #define MR686_FIXMTRR (1<<0)
51
52 #define mrwithin(mr, a) \
53 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
54 #define mroverlap(mra, mrb) \
55 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
56
57 #define mrvalid(base, len) \
58 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
59 ((len) >= (1 << 12)) && /* length is >= 4k */ \
60 powerof2((len)) && /* ... and power of two */ \
61 !((base) & ((len) - 1))) /* range is not discontiuous */
62
63 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
64
65 static int mtrrs_disabled;
66 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
67 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
68 &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
69
70 static void amd64_mrinit(struct mem_range_softc *sc);
71 static int amd64_mrset(struct mem_range_softc *sc,
72 struct mem_range_desc *mrd,
73 int *arg);
74 static void amd64_mrAPinit(struct mem_range_softc *sc);
75
76 static struct mem_range_ops amd64_mrops = {
77 amd64_mrinit,
78 amd64_mrset,
79 amd64_mrAPinit
80 };
81
82 /* XXX for AP startup hook */
83 static u_int64_t mtrrcap, mtrrdef;
84
85 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
86 struct mem_range_desc *mrd);
87 static void amd64_mrfetch(struct mem_range_softc *sc);
88 static int amd64_mtrrtype(int flags);
89 static int amd64_mrt2mtrr(int flags, int oldval);
90 static int amd64_mtrrconflict(int flag1, int flag2);
91 static void amd64_mrstore(struct mem_range_softc *sc);
92 static void amd64_mrstoreone(void *arg);
93 static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
94 u_int64_t addr);
95 static int amd64_mrsetlow(struct mem_range_softc *sc,
96 struct mem_range_desc *mrd,
97 int *arg);
98 static int amd64_mrsetvariable(struct mem_range_softc *sc,
99 struct mem_range_desc *mrd,
100 int *arg);
101
102 /* amd64 MTRR type to memory range type conversion */
103 static int amd64_mtrrtomrt[] = {
104 MDF_UNCACHEABLE,
105 MDF_WRITECOMBINE,
106 MDF_UNKNOWN,
107 MDF_UNKNOWN,
108 MDF_WRITETHROUGH,
109 MDF_WRITEPROTECT,
110 MDF_WRITEBACK
111 };
112
113 #define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
114
115 /*
116 * Used in /dev/mem drivers and elsewhere
117 */
118 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
119
120 static int
121 amd64_mtrr2mrt(int val)
122 {
123 if (val < 0 || val >= MTRRTOMRTLEN)
124 return MDF_UNKNOWN;
125 return amd64_mtrrtomrt[val];
126 }
127
128 /*
129 * amd64 MTRR conflicts. Writeback and uncachable may overlap.
130 */
131 static int
132 amd64_mtrrconflict(int flag1, int flag2)
133 {
134 flag1 &= MDF_ATTRMASK;
135 flag2 &= MDF_ATTRMASK;
136 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
137 return 1;
138 if (flag1 == flag2 ||
139 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
140 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
141 return 0;
142 return 1;
143 }
144
145 /*
146 * Look for an exactly-matching range.
147 */
148 static struct mem_range_desc *
149 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
150 {
151 struct mem_range_desc *cand;
152 int i;
153
154 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
155 if ((cand->mr_base == mrd->mr_base) &&
156 (cand->mr_len == mrd->mr_len))
157 return(cand);
158 return(NULL);
159 }
160
161 /*
162 * Fetch the current mtrr settings from the current CPU (assumed to all
163 * be in sync in the SMP case). Note that if we are here, we assume
164 * that MTRRs are enabled, and we may or may not have fixed MTRRs.
165 */
166 static void
167 amd64_mrfetch(struct mem_range_softc *sc)
168 {
169 struct mem_range_desc *mrd;
170 u_int64_t msrv;
171 int i, j, msr;
172
173 mrd = sc->mr_desc;
174
175 /* Get fixed-range MTRRs */
176 if (sc->mr_cap & MR686_FIXMTRR) {
177 msr = MSR_MTRR64kBase;
178 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
179 msrv = rdmsr(msr);
180 for (j = 0; j < 8; j++, mrd++) {
181 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
182 amd64_mtrr2mrt(msrv & 0xff) |
183 MDF_ACTIVE;
184 if (mrd->mr_owner[0] == 0)
185 strcpy(mrd->mr_owner, mem_owner_bios);
186 msrv = msrv >> 8;
187 }
188 }
189 msr = MSR_MTRR16kBase;
190 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
191 msrv = rdmsr(msr);
192 for (j = 0; j < 8; j++, mrd++) {
193 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
194 amd64_mtrr2mrt(msrv & 0xff) |
195 MDF_ACTIVE;
196 if (mrd->mr_owner[0] == 0)
197 strcpy(mrd->mr_owner, mem_owner_bios);
198 msrv = msrv >> 8;
199 }
200 }
201 msr = MSR_MTRR4kBase;
202 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
203 msrv = rdmsr(msr);
204 for (j = 0; j < 8; j++, mrd++) {
205 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
206 amd64_mtrr2mrt(msrv & 0xff) |
207 MDF_ACTIVE;
208 if (mrd->mr_owner[0] == 0)
209 strcpy(mrd->mr_owner, mem_owner_bios);
210 msrv = msrv >> 8;
211 }
212 }
213 }
214
215 /* Get remainder which must be variable MTRRs */
216 msr = MSR_MTRRVarBase;
217 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
218 msrv = rdmsr(msr);
219 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
220 amd64_mtrr2mrt(msrv & 0xff);
221 mrd->mr_base = msrv & 0x000000fffffff000L;
222 msrv = rdmsr(msr + 1);
223 mrd->mr_flags = (msrv & 0x800) ?
224 (mrd->mr_flags | MDF_ACTIVE) :
225 (mrd->mr_flags & ~MDF_ACTIVE);
226 /* Compute the range from the mask. Ick. */
227 mrd->mr_len = (~(msrv & 0x000000fffffff000L) & 0x000000ffffffffffL) + 1;
228 if (!mrvalid(mrd->mr_base, mrd->mr_len))
229 mrd->mr_flags |= MDF_BOGUS;
230 /* If unclaimed and active, must be the BIOS */
231 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
232 strcpy(mrd->mr_owner, mem_owner_bios);
233 }
234 }
235
236 /*
237 * Return the MTRR memory type matching a region's flags
238 */
239 static int
240 amd64_mtrrtype(int flags)
241 {
242 int i;
243
244 flags &= MDF_ATTRMASK;
245
246 for (i = 0; i < MTRRTOMRTLEN; i++) {
247 if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
248 continue;
249 if (flags == amd64_mtrrtomrt[i])
250 return(i);
251 }
252 return(-1);
253 }
254
255 static int
256 amd64_mrt2mtrr(int flags, int oldval)
257 {
258 int val;
259
260 if ((val = amd64_mtrrtype(flags)) == -1)
261 return oldval & 0xff;
262 return val & 0xff;
263 }
264
265 /*
266 * Update running CPU(s) MTRRs to match the ranges in the descriptor
267 * list.
268 *
269 * XXX Must be called with interrupts enabled.
270 */
271 static void
272 amd64_mrstore(struct mem_range_softc *sc)
273 {
274 #ifdef SMP
275 /*
276 * We should use ipi_all_but_self() to call other CPUs into a
277 * locking gate, then call a target function to do this work.
278 * The "proper" solution involves a generalised locking gate
279 * implementation, not ready yet.
280 */
281 smp_rendezvous(NULL, amd64_mrstoreone, NULL, (void *)sc);
282 #else
283 disable_intr(); /* disable interrupts */
284 amd64_mrstoreone((void *)sc);
285 enable_intr();
286 #endif
287 }
288
289 /*
290 * Update the current CPU's MTRRs with those represented in the
291 * descriptor list. Note that we do this wholesale rather than
292 * just stuffing one entry; this is simpler (but slower, of course).
293 */
294 static void
295 amd64_mrstoreone(void *arg)
296 {
297 struct mem_range_softc *sc = (struct mem_range_softc *)arg;
298 struct mem_range_desc *mrd;
299 u_int64_t omsrv, msrv;
300 int i, j, msr;
301 u_int cr4save;
302
303 mrd = sc->mr_desc;
304
305 cr4save = rcr4(); /* save cr4 */
306 if (cr4save & CR4_PGE)
307 load_cr4(cr4save & ~CR4_PGE);
308 load_cr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
309 wbinvd(); /* flush caches, TLBs */
310 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */
311
312 /* Set fixed-range MTRRs */
313 if (sc->mr_cap & MR686_FIXMTRR) {
314 msr = MSR_MTRR64kBase;
315 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
316 msrv = 0;
317 omsrv = rdmsr(msr);
318 for (j = 7; j >= 0; j--) {
319 msrv = msrv << 8;
320 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
321 }
322 wrmsr(msr, msrv);
323 mrd += 8;
324 }
325 msr = MSR_MTRR16kBase;
326 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
327 msrv = 0;
328 omsrv = rdmsr(msr);
329 for (j = 7; j >= 0; j--) {
330 msrv = msrv << 8;
331 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
332 }
333 wrmsr(msr, msrv);
334 mrd += 8;
335 }
336 msr = MSR_MTRR4kBase;
337 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
338 msrv = 0;
339 omsrv = rdmsr(msr);
340 for (j = 7; j >= 0; j--) {
341 msrv = msrv << 8;
342 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
343 }
344 wrmsr(msr, msrv);
345 mrd += 8;
346 }
347 }
348
349 /* Set remainder which must be variable MTRRs */
350 msr = MSR_MTRRVarBase;
351 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
352 /* base/type register */
353 omsrv = rdmsr(msr);
354 if (mrd->mr_flags & MDF_ACTIVE) {
355 msrv = mrd->mr_base & 0x000000fffffff000L;
356 msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
357 } else {
358 msrv = 0;
359 }
360 wrmsr(msr, msrv);
361
362 /* mask/active register */
363 if (mrd->mr_flags & MDF_ACTIVE) {
364 msrv = 0x800 | (~(mrd->mr_len - 1) & 0x000000fffffff000L);
365 } else {
366 msrv = 0;
367 }
368 wrmsr(msr + 1, msrv);
369 }
370 wbinvd(); /* flush caches, TLBs */
371 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */
372 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
373 load_cr4(cr4save); /* restore cr4 */
374 }
375
376 /*
377 * Hunt for the fixed MTRR referencing (addr)
378 */
379 static struct mem_range_desc *
380 amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
381 {
382 struct mem_range_desc *mrd;
383 int i;
384
385 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
386 if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
387 return(mrd);
388 return(NULL);
389 }
390
391 /*
392 * Try to satisfy the given range request by manipulating the fixed MTRRs that
393 * cover low memory.
394 *
395 * Note that we try to be generous here; we'll bloat the range out to the
396 * next higher/lower boundary to avoid the consumer having to know too much
397 * about the mechanisms here.
398 *
399 * XXX note that this will have to be updated when we start supporting "busy" ranges.
400 */
401 static int
402 amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
403 {
404 struct mem_range_desc *first_md, *last_md, *curr_md;
405
406 /* range check */
407 if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
408 ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
409 return(EINVAL);
410
411 /* check we aren't doing something risky */
412 if (!(mrd->mr_flags & MDF_FORCE))
413 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
414 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
415 return (EACCES);
416 }
417
418 /* set flags, clear set-by-firmware flag */
419 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
420 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
421 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
422 }
423
424 return(0);
425 }
426
427
428 /*
429 * Modify/add a variable MTRR to satisfy the request.
430 *
431 * XXX needs to be updated to properly support "busy" ranges.
432 */
433 static int
434 amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
435 {
436 struct mem_range_desc *curr_md, *free_md;
437 int i;
438
439 /*
440 * Scan the currently active variable descriptors, look for
441 * one we exactly match (straight takeover) and for possible
442 * accidental overlaps.
443 * Keep track of the first empty variable descriptor in case we
444 * can't perform a takeover.
445 */
446 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
447 curr_md = sc->mr_desc + i;
448 free_md = NULL;
449 for (; i < sc->mr_ndesc; i++, curr_md++) {
450 if (curr_md->mr_flags & MDF_ACTIVE) {
451 /* exact match? */
452 if ((curr_md->mr_base == mrd->mr_base) &&
453 (curr_md->mr_len == mrd->mr_len)) {
454 /* whoops, owned by someone */
455 if (curr_md->mr_flags & MDF_BUSY)
456 return(EBUSY);
457 /* check we aren't doing something risky */
458 if (!(mrd->mr_flags & MDF_FORCE) &&
459 ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN))
460 return (EACCES);
461 /* Ok, just hijack this entry */
462 free_md = curr_md;
463 break;
464 }
465 /* non-exact overlap ? */
466 if (mroverlap(curr_md, mrd)) {
467 /* between conflicting region types? */
468 if (amd64_mtrrconflict(curr_md->mr_flags, mrd->mr_flags))
469 return(EINVAL);
470 }
471 } else if (free_md == NULL) {
472 free_md = curr_md;
473 }
474 }
475 /* got somewhere to put it? */
476 if (free_md == NULL)
477 return(ENOSPC);
478
479 /* Set up new descriptor */
480 free_md->mr_base = mrd->mr_base;
481 free_md->mr_len = mrd->mr_len;
482 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
483 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
484 return(0);
485 }
486
487 /*
488 * Handle requests to set memory range attributes by manipulating MTRRs.
489 *
490 */
491 static int
492 amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
493 {
494 struct mem_range_desc *targ;
495 int error = 0;
496
497 switch(*arg) {
498 case MEMRANGE_SET_UPDATE:
499 /* make sure that what's being asked for is even possible at all */
500 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
501 amd64_mtrrtype(mrd->mr_flags) == -1)
502 return(EINVAL);
503
504 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
505
506 /* are the "low memory" conditions applicable? */
507 if ((sc->mr_cap & MR686_FIXMTRR) &&
508 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
509 if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
510 return(error);
511 } else {
512 /* it's time to play with variable MTRRs */
513 if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
514 return(error);
515 }
516 break;
517
518 case MEMRANGE_SET_REMOVE:
519 if ((targ = mem_range_match(sc, mrd)) == NULL)
520 return(ENOENT);
521 if (targ->mr_flags & MDF_FIXACTIVE)
522 return(EPERM);
523 if (targ->mr_flags & MDF_BUSY)
524 return(EBUSY);
525 targ->mr_flags &= ~MDF_ACTIVE;
526 targ->mr_owner[0] = 0;
527 break;
528
529 default:
530 return(EOPNOTSUPP);
531 }
532
533 /* update the hardware */
534 amd64_mrstore(sc);
535 amd64_mrfetch(sc); /* refetch to see where we're at */
536 return(0);
537 }
538
539 /*
540 * Work out how many ranges we support, initialise storage for them,
541 * fetch the initial settings.
542 */
543 static void
544 amd64_mrinit(struct mem_range_softc *sc)
545 {
546 struct mem_range_desc *mrd;
547 int nmdesc = 0;
548 int i;
549
550 mtrrcap = rdmsr(MSR_MTRRcap);
551 mtrrdef = rdmsr(MSR_MTRRdefType);
552
553 /* For now, bail out if MTRRs are not enabled */
554 if (!(mtrrdef & 0x800)) {
555 if (bootverbose)
556 printf("CPU supports MTRRs but not enabled\n");
557 return;
558 }
559 nmdesc = mtrrcap & 0xff;
560
561 /* If fixed MTRRs supported and enabled */
562 if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
563 sc->mr_cap = MR686_FIXMTRR;
564 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
565 }
566
567 sc->mr_desc =
568 (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc),
569 M_MEMDESC, M_WAITOK | M_ZERO);
570 sc->mr_ndesc = nmdesc;
571
572 mrd = sc->mr_desc;
573
574 /* Populate the fixed MTRR entries' base/length */
575 if (sc->mr_cap & MR686_FIXMTRR) {
576 for (i = 0; i < MTRR_N64K; i++, mrd++) {
577 mrd->mr_base = i * 0x10000;
578 mrd->mr_len = 0x10000;
579 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
580 }
581 for (i = 0; i < MTRR_N16K; i++, mrd++) {
582 mrd->mr_base = i * 0x4000 + 0x80000;
583 mrd->mr_len = 0x4000;
584 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
585 }
586 for (i = 0; i < MTRR_N4K; i++, mrd++) {
587 mrd->mr_base = i * 0x1000 + 0xc0000;
588 mrd->mr_len = 0x1000;
589 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
590 }
591 }
592
593 /*
594 * Get current settings, anything set now is considered to have
595 * been set by the firmware. (XXX has something already played here?)
596 */
597 amd64_mrfetch(sc);
598 mrd = sc->mr_desc;
599 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
600 if (mrd->mr_flags & MDF_ACTIVE)
601 mrd->mr_flags |= MDF_FIRMWARE;
602 }
603 }
604
605 /*
606 * Initialise MTRRs on an AP after the BSP has run the init code.
607 */
608 static void
609 amd64_mrAPinit(struct mem_range_softc *sc)
610 {
611 amd64_mrstoreone((void *)sc); /* set MTRRs to match BSP */
612 wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */
613 }
614
615 static void
616 amd64_mem_drvinit(void *unused)
617 {
618 if (mtrrs_disabled)
619 return;
620 if (!(cpu_feature & CPUID_MTRR))
621 return;
622 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
623 return;
624 if ((strcmp(cpu_vendor, "GenuineIntel") != 0) &&
625 (strcmp(cpu_vendor, "AuthenticAMD") != 0))
626 return;
627 mem_range_softc.mr_op = &amd64_mrops;
628 }
629
630 SYSINIT(amd64memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,amd64_mem_drvinit,NULL)
Cache object: 1c1c5343039e288cf5ecc451f12af70f
|