1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/7.4/sys/amd64/amd64/amd64_mem.c 195686 2009-07-14 17:37:59Z jkim $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41
42 /*
43 * amd64 memory range operations
44 *
45 * This code will probably be impenetrable without reference to the
46 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
47 */
48
49 static char *mem_owner_bios = "BIOS";
50
51 #define MR686_FIXMTRR (1<<0)
52
53 #define mrwithin(mr, a) \
54 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
55 #define mroverlap(mra, mrb) \
56 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
57
58 #define mrvalid(base, len) \
59 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
60 ((len) >= (1 << 12)) && /* length is >= 4k */ \
61 powerof2((len)) && /* ... and power of two */ \
62 !((base) & ((len) - 1))) /* range is not discontiuous */
63
64 #define mrcopyflags(curr, new) \
65 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
66
67 static int mtrrs_disabled;
68 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
69 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
70 &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
71
72 static void amd64_mrinit(struct mem_range_softc *sc);
73 static int amd64_mrset(struct mem_range_softc *sc,
74 struct mem_range_desc *mrd, int *arg);
75 static void amd64_mrAPinit(struct mem_range_softc *sc);
76
77 static struct mem_range_ops amd64_mrops = {
78 amd64_mrinit,
79 amd64_mrset,
80 amd64_mrAPinit
81 };
82
83 /* XXX for AP startup hook */
84 static u_int64_t mtrrcap, mtrrdef;
85
86 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
87 static u_int64_t mtrr_physmask;
88
89 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
90 struct mem_range_desc *mrd);
91 static void amd64_mrfetch(struct mem_range_softc *sc);
92 static int amd64_mtrrtype(int flags);
93 static int amd64_mrt2mtrr(int flags, int oldval);
94 static int amd64_mtrrconflict(int flag1, int flag2);
95 static void amd64_mrstore(struct mem_range_softc *sc);
96 static void amd64_mrstoreone(void *arg);
97 static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
98 u_int64_t addr);
99 static int amd64_mrsetlow(struct mem_range_softc *sc,
100 struct mem_range_desc *mrd, int *arg);
101 static int amd64_mrsetvariable(struct mem_range_softc *sc,
102 struct mem_range_desc *mrd, int *arg);
103
104 /* amd64 MTRR type to memory range type conversion */
105 static int amd64_mtrrtomrt[] = {
106 MDF_UNCACHEABLE,
107 MDF_WRITECOMBINE,
108 MDF_UNKNOWN,
109 MDF_UNKNOWN,
110 MDF_WRITETHROUGH,
111 MDF_WRITEPROTECT,
112 MDF_WRITEBACK
113 };
114
115 #define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
116
117 static int
118 amd64_mtrr2mrt(int val)
119 {
120
121 if (val < 0 || val >= MTRRTOMRTLEN)
122 return (MDF_UNKNOWN);
123 return (amd64_mtrrtomrt[val]);
124 }
125
126 /*
127 * amd64 MTRR conflicts. Writeback and uncachable may overlap.
128 */
129 static int
130 amd64_mtrrconflict(int flag1, int flag2)
131 {
132
133 flag1 &= MDF_ATTRMASK;
134 flag2 &= MDF_ATTRMASK;
135 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
136 return (1);
137 if (flag1 == flag2 ||
138 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
139 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
140 return (0);
141 return (1);
142 }
143
144 /*
145 * Look for an exactly-matching range.
146 */
147 static struct mem_range_desc *
148 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
149 {
150 struct mem_range_desc *cand;
151 int i;
152
153 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
154 if ((cand->mr_base == mrd->mr_base) &&
155 (cand->mr_len == mrd->mr_len))
156 return (cand);
157 return (NULL);
158 }
159
160 /*
161 * Fetch the current mtrr settings from the current CPU (assumed to
162 * all be in sync in the SMP case). Note that if we are here, we
163 * assume that MTRRs are enabled, and we may or may not have fixed
164 * MTRRs.
165 */
166 static void
167 amd64_mrfetch(struct mem_range_softc *sc)
168 {
169 struct mem_range_desc *mrd;
170 u_int64_t msrv;
171 int i, j, msr;
172
173 mrd = sc->mr_desc;
174
175 /* Get fixed-range MTRRs. */
176 if (sc->mr_cap & MR686_FIXMTRR) {
177 msr = MSR_MTRR64kBase;
178 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
179 msrv = rdmsr(msr);
180 for (j = 0; j < 8; j++, mrd++) {
181 mrd->mr_flags =
182 (mrd->mr_flags & ~MDF_ATTRMASK) |
183 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
184 if (mrd->mr_owner[0] == 0)
185 strcpy(mrd->mr_owner, mem_owner_bios);
186 msrv = msrv >> 8;
187 }
188 }
189 msr = MSR_MTRR16kBase;
190 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
191 msrv = rdmsr(msr);
192 for (j = 0; j < 8; j++, mrd++) {
193 mrd->mr_flags =
194 (mrd->mr_flags & ~MDF_ATTRMASK) |
195 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
196 if (mrd->mr_owner[0] == 0)
197 strcpy(mrd->mr_owner, mem_owner_bios);
198 msrv = msrv >> 8;
199 }
200 }
201 msr = MSR_MTRR4kBase;
202 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
203 msrv = rdmsr(msr);
204 for (j = 0; j < 8; j++, mrd++) {
205 mrd->mr_flags =
206 (mrd->mr_flags & ~MDF_ATTRMASK) |
207 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
208 if (mrd->mr_owner[0] == 0)
209 strcpy(mrd->mr_owner, mem_owner_bios);
210 msrv = msrv >> 8;
211 }
212 }
213 }
214
215 /* Get remainder which must be variable MTRRs. */
216 msr = MSR_MTRRVarBase;
217 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
218 msrv = rdmsr(msr);
219 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
220 amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
221 mrd->mr_base = msrv & mtrr_physmask;
222 msrv = rdmsr(msr + 1);
223 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
224 (mrd->mr_flags | MDF_ACTIVE) :
225 (mrd->mr_flags & ~MDF_ACTIVE);
226
227 /* Compute the range from the mask. Ick. */
228 mrd->mr_len = (~(msrv & mtrr_physmask) &
229 (mtrr_physmask | 0xfffL)) + 1;
230 if (!mrvalid(mrd->mr_base, mrd->mr_len))
231 mrd->mr_flags |= MDF_BOGUS;
232
233 /* If unclaimed and active, must be the BIOS. */
234 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
235 strcpy(mrd->mr_owner, mem_owner_bios);
236 }
237 }
238
239 /*
240 * Return the MTRR memory type matching a region's flags
241 */
242 static int
243 amd64_mtrrtype(int flags)
244 {
245 int i;
246
247 flags &= MDF_ATTRMASK;
248
249 for (i = 0; i < MTRRTOMRTLEN; i++) {
250 if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
251 continue;
252 if (flags == amd64_mtrrtomrt[i])
253 return (i);
254 }
255 return (-1);
256 }
257
258 static int
259 amd64_mrt2mtrr(int flags, int oldval)
260 {
261 int val;
262
263 if ((val = amd64_mtrrtype(flags)) == -1)
264 return (oldval & 0xff);
265 return (val & 0xff);
266 }
267
268 /*
269 * Update running CPU(s) MTRRs to match the ranges in the descriptor
270 * list.
271 *
272 * XXX Must be called with interrupts enabled.
273 */
274 static void
275 amd64_mrstore(struct mem_range_softc *sc)
276 {
277 #ifdef SMP
278 /*
279 * We should use ipi_all_but_self() to call other CPUs into a
280 * locking gate, then call a target function to do this work.
281 * The "proper" solution involves a generalised locking gate
282 * implementation, not ready yet.
283 */
284 smp_rendezvous(NULL, amd64_mrstoreone, NULL, sc);
285 #else
286 disable_intr(); /* disable interrupts */
287 amd64_mrstoreone(sc);
288 enable_intr();
289 #endif
290 }
291
292 /*
293 * Update the current CPU's MTRRs with those represented in the
294 * descriptor list. Note that we do this wholesale rather than just
295 * stuffing one entry; this is simpler (but slower, of course).
296 */
297 static void
298 amd64_mrstoreone(void *arg)
299 {
300 struct mem_range_softc *sc = arg;
301 struct mem_range_desc *mrd;
302 u_int64_t omsrv, msrv;
303 int i, j, msr;
304 u_int cr4save;
305
306 mrd = sc->mr_desc;
307
308 /* Disable PGE. */
309 cr4save = rcr4();
310 if (cr4save & CR4_PGE)
311 load_cr4(cr4save & ~CR4_PGE);
312
313 /* Disable caches (CD = 1, NW = 0). */
314 load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
315
316 /* Flushes caches and TLBs. */
317 wbinvd();
318
319 /* Disable MTRRs (E = 0). */
320 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
321
322 /* Set fixed-range MTRRs. */
323 if (sc->mr_cap & MR686_FIXMTRR) {
324 msr = MSR_MTRR64kBase;
325 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
326 msrv = 0;
327 omsrv = rdmsr(msr);
328 for (j = 7; j >= 0; j--) {
329 msrv = msrv << 8;
330 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
331 omsrv >> (j * 8));
332 }
333 wrmsr(msr, msrv);
334 mrd += 8;
335 }
336 msr = MSR_MTRR16kBase;
337 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
338 msrv = 0;
339 omsrv = rdmsr(msr);
340 for (j = 7; j >= 0; j--) {
341 msrv = msrv << 8;
342 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
343 omsrv >> (j * 8));
344 }
345 wrmsr(msr, msrv);
346 mrd += 8;
347 }
348 msr = MSR_MTRR4kBase;
349 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
350 msrv = 0;
351 omsrv = rdmsr(msr);
352 for (j = 7; j >= 0; j--) {
353 msrv = msrv << 8;
354 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
355 omsrv >> (j * 8));
356 }
357 wrmsr(msr, msrv);
358 mrd += 8;
359 }
360 }
361
362 /* Set remainder which must be variable MTRRs. */
363 msr = MSR_MTRRVarBase;
364 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
365 /* base/type register */
366 omsrv = rdmsr(msr);
367 if (mrd->mr_flags & MDF_ACTIVE) {
368 msrv = mrd->mr_base & mtrr_physmask;
369 msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
370 } else {
371 msrv = 0;
372 }
373 wrmsr(msr, msrv);
374
375 /* mask/active register */
376 if (mrd->mr_flags & MDF_ACTIVE) {
377 msrv = MTRR_PHYSMASK_VALID |
378 (~(mrd->mr_len - 1) & mtrr_physmask);
379 } else {
380 msrv = 0;
381 }
382 wrmsr(msr + 1, msrv);
383 }
384
385 /* Flush caches, TLBs. */
386 wbinvd();
387
388 /* Enable MTRRs. */
389 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
390
391 /* Enable caches (CD = 0, NW = 0). */
392 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
393
394 /* Restore PGE. */
395 load_cr4(cr4save);
396 }
397
398 /*
399 * Hunt for the fixed MTRR referencing (addr)
400 */
401 static struct mem_range_desc *
402 amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
403 {
404 struct mem_range_desc *mrd;
405 int i;
406
407 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
408 i++, mrd++)
409 if ((addr >= mrd->mr_base) &&
410 (addr < (mrd->mr_base + mrd->mr_len)))
411 return (mrd);
412 return (NULL);
413 }
414
415 /*
416 * Try to satisfy the given range request by manipulating the fixed
417 * MTRRs that cover low memory.
418 *
419 * Note that we try to be generous here; we'll bloat the range out to
420 * the next higher/lower boundary to avoid the consumer having to know
421 * too much about the mechanisms here.
422 *
423 * XXX note that this will have to be updated when we start supporting
424 * "busy" ranges.
425 */
426 static int
427 amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
428 {
429 struct mem_range_desc *first_md, *last_md, *curr_md;
430
431 /* Range check. */
432 if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
433 ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
434 return (EINVAL);
435
436 /* Check that we aren't doing something risky. */
437 if (!(mrd->mr_flags & MDF_FORCE))
438 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
439 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
440 return (EACCES);
441 }
442
443 /* Set flags, clear set-by-firmware flag. */
444 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
445 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
446 ~MDF_FIRMWARE, mrd->mr_flags);
447 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
448 }
449
450 return (0);
451 }
452
453 /*
454 * Modify/add a variable MTRR to satisfy the request.
455 *
456 * XXX needs to be updated to properly support "busy" ranges.
457 */
458 static int
459 amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
460 int *arg)
461 {
462 struct mem_range_desc *curr_md, *free_md;
463 int i;
464
465 /*
466 * Scan the currently active variable descriptors, look for
467 * one we exactly match (straight takeover) and for possible
468 * accidental overlaps.
469 *
470 * Keep track of the first empty variable descriptor in case
471 * we can't perform a takeover.
472 */
473 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
474 curr_md = sc->mr_desc + i;
475 free_md = NULL;
476 for (; i < sc->mr_ndesc; i++, curr_md++) {
477 if (curr_md->mr_flags & MDF_ACTIVE) {
478 /* Exact match? */
479 if ((curr_md->mr_base == mrd->mr_base) &&
480 (curr_md->mr_len == mrd->mr_len)) {
481
482 /* Whoops, owned by someone. */
483 if (curr_md->mr_flags & MDF_BUSY)
484 return (EBUSY);
485
486 /* Check that we aren't doing something risky */
487 if (!(mrd->mr_flags & MDF_FORCE) &&
488 ((curr_md->mr_flags & MDF_ATTRMASK) ==
489 MDF_UNKNOWN))
490 return (EACCES);
491
492 /* Ok, just hijack this entry. */
493 free_md = curr_md;
494 break;
495 }
496
497 /* Non-exact overlap? */
498 if (mroverlap(curr_md, mrd)) {
499 /* Between conflicting region types? */
500 if (amd64_mtrrconflict(curr_md->mr_flags,
501 mrd->mr_flags))
502 return (EINVAL);
503 }
504 } else if (free_md == NULL) {
505 free_md = curr_md;
506 }
507 }
508
509 /* Got somewhere to put it? */
510 if (free_md == NULL)
511 return (ENOSPC);
512
513 /* Set up new descriptor. */
514 free_md->mr_base = mrd->mr_base;
515 free_md->mr_len = mrd->mr_len;
516 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
517 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
518 return (0);
519 }
520
521 /*
522 * Handle requests to set memory range attributes by manipulating MTRRs.
523 */
524 static int
525 amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
526 {
527 struct mem_range_desc *targ;
528 int error = 0;
529
530 switch(*arg) {
531 case MEMRANGE_SET_UPDATE:
532 /*
533 * Make sure that what's being asked for is even
534 * possible at all.
535 */
536 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
537 amd64_mtrrtype(mrd->mr_flags) == -1)
538 return (EINVAL);
539
540 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
541
542 /* Are the "low memory" conditions applicable? */
543 if ((sc->mr_cap & MR686_FIXMTRR) &&
544 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
545 if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
546 return (error);
547 } else {
548 /* It's time to play with variable MTRRs. */
549 if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
550 return (error);
551 }
552 break;
553
554 case MEMRANGE_SET_REMOVE:
555 if ((targ = mem_range_match(sc, mrd)) == NULL)
556 return (ENOENT);
557 if (targ->mr_flags & MDF_FIXACTIVE)
558 return (EPERM);
559 if (targ->mr_flags & MDF_BUSY)
560 return (EBUSY);
561 targ->mr_flags &= ~MDF_ACTIVE;
562 targ->mr_owner[0] = 0;
563 break;
564
565 default:
566 return (EOPNOTSUPP);
567 }
568
569 /* Update the hardware. */
570 amd64_mrstore(sc);
571
572 /* Refetch to see where we're at. */
573 amd64_mrfetch(sc);
574 return (0);
575 }
576
577 /*
578 * Work out how many ranges we support, initialise storage for them,
579 * and fetch the initial settings.
580 */
581 static void
582 amd64_mrinit(struct mem_range_softc *sc)
583 {
584 struct mem_range_desc *mrd;
585 u_int regs[4];
586 int i, nmdesc = 0, pabits;
587
588 mtrrcap = rdmsr(MSR_MTRRcap);
589 mtrrdef = rdmsr(MSR_MTRRdefType);
590
591 /* For now, bail out if MTRRs are not enabled. */
592 if (!(mtrrdef & MTRR_DEF_ENABLE)) {
593 if (bootverbose)
594 printf("CPU supports MTRRs but not enabled\n");
595 return;
596 }
597 nmdesc = mtrrcap & MTRR_CAP_VCNT;
598
599 /*
600 * Determine the size of the PhysMask and PhysBase fields in
601 * the variable range MTRRs. If the extended CPUID 0x80000008
602 * is present, use that to figure out how many physical
603 * address bits the CPU supports. Otherwise, default to 36
604 * address bits.
605 */
606 if (cpu_exthigh >= 0x80000008) {
607 do_cpuid(0x80000008, regs);
608 pabits = regs[0] & 0xff;
609 } else
610 pabits = 36;
611 mtrr_physmask = ((1UL << pabits) - 1) & ~0xfffUL;
612
613 /* If fixed MTRRs supported and enabled. */
614 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
615 sc->mr_cap = MR686_FIXMTRR;
616 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
617 }
618
619 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
620 M_WAITOK | M_ZERO);
621 sc->mr_ndesc = nmdesc;
622
623 mrd = sc->mr_desc;
624
625 /* Populate the fixed MTRR entries' base/length. */
626 if (sc->mr_cap & MR686_FIXMTRR) {
627 for (i = 0; i < MTRR_N64K; i++, mrd++) {
628 mrd->mr_base = i * 0x10000;
629 mrd->mr_len = 0x10000;
630 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
631 MDF_FIXACTIVE;
632 }
633 for (i = 0; i < MTRR_N16K; i++, mrd++) {
634 mrd->mr_base = i * 0x4000 + 0x80000;
635 mrd->mr_len = 0x4000;
636 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
637 MDF_FIXACTIVE;
638 }
639 for (i = 0; i < MTRR_N4K; i++, mrd++) {
640 mrd->mr_base = i * 0x1000 + 0xc0000;
641 mrd->mr_len = 0x1000;
642 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
643 MDF_FIXACTIVE;
644 }
645 }
646
647 /*
648 * Get current settings, anything set now is considered to
649 * have been set by the firmware. (XXX has something already
650 * played here?)
651 */
652 amd64_mrfetch(sc);
653 mrd = sc->mr_desc;
654 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
655 if (mrd->mr_flags & MDF_ACTIVE)
656 mrd->mr_flags |= MDF_FIRMWARE;
657 }
658 }
659
660 /*
661 * Initialise MTRRs on an AP after the BSP has run the init code.
662 */
663 static void
664 amd64_mrAPinit(struct mem_range_softc *sc)
665 {
666
667 amd64_mrstoreone(sc);
668 wrmsr(MSR_MTRRdefType, mtrrdef);
669 }
670
671 static void
672 amd64_mem_drvinit(void *unused)
673 {
674
675 if (mtrrs_disabled)
676 return;
677 if (!(cpu_feature & CPUID_MTRR))
678 return;
679 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
680 return;
681 switch (cpu_vendor_id) {
682 case CPU_VENDOR_INTEL:
683 case CPU_VENDOR_AMD:
684 break;
685 case CPU_VENDOR_CENTAUR:
686 if (cpu_exthigh >= 0x80000008)
687 break;
688 /* FALLTHROUGH */
689 default:
690 return;
691 }
692 mem_range_softc.mr_op = &amd64_mrops;
693 }
694 SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);
Cache object: 059233707bca0a515c03dc1d0ed44ded
|