1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37
38 #include <vm/vm.h>
39 #include <vm/vm_param.h>
40 #include <vm/pmap.h>
41
42 #include <machine/cputypes.h>
43 #include <machine/md_var.h>
44 #include <machine/specialreg.h>
45
46 /*
47 * amd64 memory range operations
48 *
49 * This code will probably be impenetrable without reference to the
50 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
51 */
52
53 static char *mem_owner_bios = "BIOS";
54
55 #define MR686_FIXMTRR (1<<0)
56
57 #define mrwithin(mr, a) \
58 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
59 #define mroverlap(mra, mrb) \
60 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
61
62 #define mrvalid(base, len) \
63 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
64 ((len) >= (1 << 12)) && /* length is >= 4k */ \
65 powerof2((len)) && /* ... and power of two */ \
66 !((base) & ((len) - 1))) /* range is not discontiuous */
67
68 #define mrcopyflags(curr, new) \
69 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
70
71 static int mtrrs_disabled;
72 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
73 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
74 &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
75
76 static void amd64_mrinit(struct mem_range_softc *sc);
77 static int amd64_mrset(struct mem_range_softc *sc,
78 struct mem_range_desc *mrd, int *arg);
79 static void amd64_mrAPinit(struct mem_range_softc *sc);
80 static void amd64_mrreinit(struct mem_range_softc *sc);
81
82 static struct mem_range_ops amd64_mrops = {
83 amd64_mrinit,
84 amd64_mrset,
85 amd64_mrAPinit,
86 amd64_mrreinit
87 };
88
89 /* XXX for AP startup hook */
90 static u_int64_t mtrrcap, mtrrdef;
91
92 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
93 static u_int64_t mtrr_physmask;
94
95 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
96 struct mem_range_desc *mrd);
97 static void amd64_mrfetch(struct mem_range_softc *sc);
98 static int amd64_mtrrtype(int flags);
99 static int amd64_mrt2mtrr(int flags, int oldval);
100 static int amd64_mtrrconflict(int flag1, int flag2);
101 static void amd64_mrstore(struct mem_range_softc *sc);
102 static void amd64_mrstoreone(void *arg);
103 static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
104 u_int64_t addr);
105 static int amd64_mrsetlow(struct mem_range_softc *sc,
106 struct mem_range_desc *mrd, int *arg);
107 static int amd64_mrsetvariable(struct mem_range_softc *sc,
108 struct mem_range_desc *mrd, int *arg);
109
110 /* amd64 MTRR type to memory range type conversion */
111 static int amd64_mtrrtomrt[] = {
112 MDF_UNCACHEABLE,
113 MDF_WRITECOMBINE,
114 MDF_UNKNOWN,
115 MDF_UNKNOWN,
116 MDF_WRITETHROUGH,
117 MDF_WRITEPROTECT,
118 MDF_WRITEBACK
119 };
120
121 #define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
122
123 static int
124 amd64_mtrr2mrt(int val)
125 {
126
127 if (val < 0 || val >= MTRRTOMRTLEN)
128 return (MDF_UNKNOWN);
129 return (amd64_mtrrtomrt[val]);
130 }
131
132 /*
133 * amd64 MTRR conflicts. Writeback and uncachable may overlap.
134 */
135 static int
136 amd64_mtrrconflict(int flag1, int flag2)
137 {
138
139 flag1 &= MDF_ATTRMASK;
140 flag2 &= MDF_ATTRMASK;
141 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
142 return (1);
143 if (flag1 == flag2 ||
144 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
145 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
146 return (0);
147 return (1);
148 }
149
150 /*
151 * Look for an exactly-matching range.
152 */
153 static struct mem_range_desc *
154 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
155 {
156 struct mem_range_desc *cand;
157 int i;
158
159 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
160 if ((cand->mr_base == mrd->mr_base) &&
161 (cand->mr_len == mrd->mr_len))
162 return (cand);
163 return (NULL);
164 }
165
166 /*
167 * Fetch the current mtrr settings from the current CPU (assumed to
168 * all be in sync in the SMP case). Note that if we are here, we
169 * assume that MTRRs are enabled, and we may or may not have fixed
170 * MTRRs.
171 */
172 static void
173 amd64_mrfetch(struct mem_range_softc *sc)
174 {
175 struct mem_range_desc *mrd;
176 u_int64_t msrv;
177 int i, j, msr;
178
179 mrd = sc->mr_desc;
180
181 /* Get fixed-range MTRRs. */
182 if (sc->mr_cap & MR686_FIXMTRR) {
183 msr = MSR_MTRR64kBase;
184 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
185 msrv = rdmsr(msr);
186 for (j = 0; j < 8; j++, mrd++) {
187 mrd->mr_flags =
188 (mrd->mr_flags & ~MDF_ATTRMASK) |
189 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
190 if (mrd->mr_owner[0] == 0)
191 strcpy(mrd->mr_owner, mem_owner_bios);
192 msrv = msrv >> 8;
193 }
194 }
195 msr = MSR_MTRR16kBase;
196 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
197 msrv = rdmsr(msr);
198 for (j = 0; j < 8; j++, mrd++) {
199 mrd->mr_flags =
200 (mrd->mr_flags & ~MDF_ATTRMASK) |
201 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
202 if (mrd->mr_owner[0] == 0)
203 strcpy(mrd->mr_owner, mem_owner_bios);
204 msrv = msrv >> 8;
205 }
206 }
207 msr = MSR_MTRR4kBase;
208 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
209 msrv = rdmsr(msr);
210 for (j = 0; j < 8; j++, mrd++) {
211 mrd->mr_flags =
212 (mrd->mr_flags & ~MDF_ATTRMASK) |
213 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
214 if (mrd->mr_owner[0] == 0)
215 strcpy(mrd->mr_owner, mem_owner_bios);
216 msrv = msrv >> 8;
217 }
218 }
219 }
220
221 /* Get remainder which must be variable MTRRs. */
222 msr = MSR_MTRRVarBase;
223 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
224 msrv = rdmsr(msr);
225 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
226 amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
227 mrd->mr_base = msrv & mtrr_physmask;
228 msrv = rdmsr(msr + 1);
229 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
230 (mrd->mr_flags | MDF_ACTIVE) :
231 (mrd->mr_flags & ~MDF_ACTIVE);
232
233 /* Compute the range from the mask. Ick. */
234 mrd->mr_len = (~(msrv & mtrr_physmask) &
235 (mtrr_physmask | 0xfffL)) + 1;
236 if (!mrvalid(mrd->mr_base, mrd->mr_len))
237 mrd->mr_flags |= MDF_BOGUS;
238
239 /* If unclaimed and active, must be the BIOS. */
240 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
241 strcpy(mrd->mr_owner, mem_owner_bios);
242 }
243 }
244
245 /*
246 * Return the MTRR memory type matching a region's flags
247 */
248 static int
249 amd64_mtrrtype(int flags)
250 {
251 int i;
252
253 flags &= MDF_ATTRMASK;
254
255 for (i = 0; i < MTRRTOMRTLEN; i++) {
256 if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
257 continue;
258 if (flags == amd64_mtrrtomrt[i])
259 return (i);
260 }
261 return (-1);
262 }
263
264 static int
265 amd64_mrt2mtrr(int flags, int oldval)
266 {
267 int val;
268
269 if ((val = amd64_mtrrtype(flags)) == -1)
270 return (oldval & 0xff);
271 return (val & 0xff);
272 }
273
274 /*
275 * Update running CPU(s) MTRRs to match the ranges in the descriptor
276 * list.
277 *
278 * XXX Must be called with interrupts enabled.
279 */
280 static void
281 amd64_mrstore(struct mem_range_softc *sc)
282 {
283 #ifdef SMP
284 /*
285 * We should use ipi_all_but_self() to call other CPUs into a
286 * locking gate, then call a target function to do this work.
287 * The "proper" solution involves a generalised locking gate
288 * implementation, not ready yet.
289 */
290 smp_rendezvous(NULL, amd64_mrstoreone, NULL, sc);
291 #else
292 disable_intr(); /* disable interrupts */
293 amd64_mrstoreone(sc);
294 enable_intr();
295 #endif
296 }
297
298 /*
299 * Update the current CPU's MTRRs with those represented in the
300 * descriptor list. Note that we do this wholesale rather than just
301 * stuffing one entry; this is simpler (but slower, of course).
302 */
303 static void
304 amd64_mrstoreone(void *arg)
305 {
306 struct mem_range_softc *sc = arg;
307 struct mem_range_desc *mrd;
308 u_int64_t omsrv, msrv;
309 int i, j, msr;
310 u_int cr4save;
311
312 mrd = sc->mr_desc;
313
314 /* Disable PGE. */
315 cr4save = rcr4();
316 if (cr4save & CR4_PGE)
317 load_cr4(cr4save & ~CR4_PGE);
318
319 /* Disable caches (CD = 1, NW = 0). */
320 load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
321
322 /* Flushes caches and TLBs. */
323 wbinvd();
324
325 /* Disable MTRRs (E = 0). */
326 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
327
328 /* Set fixed-range MTRRs. */
329 if (sc->mr_cap & MR686_FIXMTRR) {
330 msr = MSR_MTRR64kBase;
331 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
332 msrv = 0;
333 omsrv = rdmsr(msr);
334 for (j = 7; j >= 0; j--) {
335 msrv = msrv << 8;
336 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
337 omsrv >> (j * 8));
338 }
339 wrmsr(msr, msrv);
340 mrd += 8;
341 }
342 msr = MSR_MTRR16kBase;
343 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
344 msrv = 0;
345 omsrv = rdmsr(msr);
346 for (j = 7; j >= 0; j--) {
347 msrv = msrv << 8;
348 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
349 omsrv >> (j * 8));
350 }
351 wrmsr(msr, msrv);
352 mrd += 8;
353 }
354 msr = MSR_MTRR4kBase;
355 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
356 msrv = 0;
357 omsrv = rdmsr(msr);
358 for (j = 7; j >= 0; j--) {
359 msrv = msrv << 8;
360 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
361 omsrv >> (j * 8));
362 }
363 wrmsr(msr, msrv);
364 mrd += 8;
365 }
366 }
367
368 /* Set remainder which must be variable MTRRs. */
369 msr = MSR_MTRRVarBase;
370 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
371 /* base/type register */
372 omsrv = rdmsr(msr);
373 if (mrd->mr_flags & MDF_ACTIVE) {
374 msrv = mrd->mr_base & mtrr_physmask;
375 msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
376 } else {
377 msrv = 0;
378 }
379 wrmsr(msr, msrv);
380
381 /* mask/active register */
382 if (mrd->mr_flags & MDF_ACTIVE) {
383 msrv = MTRR_PHYSMASK_VALID |
384 (~(mrd->mr_len - 1) & mtrr_physmask);
385 } else {
386 msrv = 0;
387 }
388 wrmsr(msr + 1, msrv);
389 }
390
391 /* Flush caches, TLBs. */
392 wbinvd();
393
394 /* Enable MTRRs. */
395 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
396
397 /* Enable caches (CD = 0, NW = 0). */
398 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
399
400 /* Restore PGE. */
401 load_cr4(cr4save);
402 }
403
404 /*
405 * Hunt for the fixed MTRR referencing (addr)
406 */
407 static struct mem_range_desc *
408 amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
409 {
410 struct mem_range_desc *mrd;
411 int i;
412
413 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
414 i++, mrd++)
415 if ((addr >= mrd->mr_base) &&
416 (addr < (mrd->mr_base + mrd->mr_len)))
417 return (mrd);
418 return (NULL);
419 }
420
421 /*
422 * Try to satisfy the given range request by manipulating the fixed
423 * MTRRs that cover low memory.
424 *
425 * Note that we try to be generous here; we'll bloat the range out to
426 * the next higher/lower boundary to avoid the consumer having to know
427 * too much about the mechanisms here.
428 *
429 * XXX note that this will have to be updated when we start supporting
430 * "busy" ranges.
431 */
432 static int
433 amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
434 {
435 struct mem_range_desc *first_md, *last_md, *curr_md;
436
437 /* Range check. */
438 if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
439 ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
440 return (EINVAL);
441
442 /* Check that we aren't doing something risky. */
443 if (!(mrd->mr_flags & MDF_FORCE))
444 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
445 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
446 return (EACCES);
447 }
448
449 /* Set flags, clear set-by-firmware flag. */
450 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
451 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
452 ~MDF_FIRMWARE, mrd->mr_flags);
453 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
454 }
455
456 return (0);
457 }
458
459 /*
460 * Modify/add a variable MTRR to satisfy the request.
461 *
462 * XXX needs to be updated to properly support "busy" ranges.
463 */
464 static int
465 amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
466 int *arg)
467 {
468 struct mem_range_desc *curr_md, *free_md;
469 int i;
470
471 /*
472 * Scan the currently active variable descriptors, look for
473 * one we exactly match (straight takeover) and for possible
474 * accidental overlaps.
475 *
476 * Keep track of the first empty variable descriptor in case
477 * we can't perform a takeover.
478 */
479 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
480 curr_md = sc->mr_desc + i;
481 free_md = NULL;
482 for (; i < sc->mr_ndesc; i++, curr_md++) {
483 if (curr_md->mr_flags & MDF_ACTIVE) {
484 /* Exact match? */
485 if ((curr_md->mr_base == mrd->mr_base) &&
486 (curr_md->mr_len == mrd->mr_len)) {
487
488 /* Whoops, owned by someone. */
489 if (curr_md->mr_flags & MDF_BUSY)
490 return (EBUSY);
491
492 /* Check that we aren't doing something risky */
493 if (!(mrd->mr_flags & MDF_FORCE) &&
494 ((curr_md->mr_flags & MDF_ATTRMASK) ==
495 MDF_UNKNOWN))
496 return (EACCES);
497
498 /* Ok, just hijack this entry. */
499 free_md = curr_md;
500 break;
501 }
502
503 /* Non-exact overlap? */
504 if (mroverlap(curr_md, mrd)) {
505 /* Between conflicting region types? */
506 if (amd64_mtrrconflict(curr_md->mr_flags,
507 mrd->mr_flags))
508 return (EINVAL);
509 }
510 } else if (free_md == NULL) {
511 free_md = curr_md;
512 }
513 }
514
515 /* Got somewhere to put it? */
516 if (free_md == NULL)
517 return (ENOSPC);
518
519 /* Set up new descriptor. */
520 free_md->mr_base = mrd->mr_base;
521 free_md->mr_len = mrd->mr_len;
522 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
523 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
524 return (0);
525 }
526
527 /*
528 * Handle requests to set memory range attributes by manipulating MTRRs.
529 */
530 static int
531 amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
532 {
533 struct mem_range_desc *targ;
534 int error, i;
535
536 switch (*arg) {
537 case MEMRANGE_SET_UPDATE:
538 /*
539 * Make sure that what's being asked for is even
540 * possible at all.
541 */
542 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
543 amd64_mtrrtype(mrd->mr_flags) == -1)
544 return (EINVAL);
545
546 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
547
548 /* Are the "low memory" conditions applicable? */
549 if ((sc->mr_cap & MR686_FIXMTRR) &&
550 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
551 if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
552 return (error);
553 } else {
554 /* It's time to play with variable MTRRs. */
555 if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
556 return (error);
557 }
558 break;
559
560 case MEMRANGE_SET_REMOVE:
561 if ((targ = mem_range_match(sc, mrd)) == NULL)
562 return (ENOENT);
563 if (targ->mr_flags & MDF_FIXACTIVE)
564 return (EPERM);
565 if (targ->mr_flags & MDF_BUSY)
566 return (EBUSY);
567 targ->mr_flags &= ~MDF_ACTIVE;
568 targ->mr_owner[0] = 0;
569 break;
570
571 default:
572 return (EOPNOTSUPP);
573 }
574
575 /*
576 * Ensure that the direct map region does not contain any mappings
577 * that span MTRRs of different types. However, the fixed MTRRs can
578 * be ignored, because a large page mapping the first 1 MB of physical
579 * memory is a special case that the processor handles. The entire
580 * TLB will be invalidated by amd64_mrstore(), so pmap_demote_DMAP()
581 * needn't do it.
582 */
583 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
584 mrd = sc->mr_desc + i;
585 for (; i < sc->mr_ndesc; i++, mrd++) {
586 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
587 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, FALSE);
588 }
589
590 /* Update the hardware. */
591 amd64_mrstore(sc);
592
593 /* Refetch to see where we're at. */
594 amd64_mrfetch(sc);
595 return (0);
596 }
597
598 /*
599 * Work out how many ranges we support, initialise storage for them,
600 * and fetch the initial settings.
601 */
602 static void
603 amd64_mrinit(struct mem_range_softc *sc)
604 {
605 struct mem_range_desc *mrd;
606 u_int regs[4];
607 int i, nmdesc = 0, pabits;
608
609 mtrrcap = rdmsr(MSR_MTRRcap);
610 mtrrdef = rdmsr(MSR_MTRRdefType);
611
612 /* For now, bail out if MTRRs are not enabled. */
613 if (!(mtrrdef & MTRR_DEF_ENABLE)) {
614 if (bootverbose)
615 printf("CPU supports MTRRs but not enabled\n");
616 return;
617 }
618 nmdesc = mtrrcap & MTRR_CAP_VCNT;
619
620 /*
621 * Determine the size of the PhysMask and PhysBase fields in
622 * the variable range MTRRs. If the extended CPUID 0x80000008
623 * is present, use that to figure out how many physical
624 * address bits the CPU supports. Otherwise, default to 36
625 * address bits.
626 */
627 if (cpu_exthigh >= 0x80000008) {
628 do_cpuid(0x80000008, regs);
629 pabits = regs[0] & 0xff;
630 } else
631 pabits = 36;
632 mtrr_physmask = ((1UL << pabits) - 1) & ~0xfffUL;
633
634 /* If fixed MTRRs supported and enabled. */
635 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
636 sc->mr_cap = MR686_FIXMTRR;
637 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
638 }
639
640 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
641 M_WAITOK | M_ZERO);
642 sc->mr_ndesc = nmdesc;
643
644 mrd = sc->mr_desc;
645
646 /* Populate the fixed MTRR entries' base/length. */
647 if (sc->mr_cap & MR686_FIXMTRR) {
648 for (i = 0; i < MTRR_N64K; i++, mrd++) {
649 mrd->mr_base = i * 0x10000;
650 mrd->mr_len = 0x10000;
651 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
652 MDF_FIXACTIVE;
653 }
654 for (i = 0; i < MTRR_N16K; i++, mrd++) {
655 mrd->mr_base = i * 0x4000 + 0x80000;
656 mrd->mr_len = 0x4000;
657 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
658 MDF_FIXACTIVE;
659 }
660 for (i = 0; i < MTRR_N4K; i++, mrd++) {
661 mrd->mr_base = i * 0x1000 + 0xc0000;
662 mrd->mr_len = 0x1000;
663 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
664 MDF_FIXACTIVE;
665 }
666 }
667
668 /*
669 * Get current settings, anything set now is considered to
670 * have been set by the firmware. (XXX has something already
671 * played here?)
672 */
673 amd64_mrfetch(sc);
674 mrd = sc->mr_desc;
675 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
676 if (mrd->mr_flags & MDF_ACTIVE)
677 mrd->mr_flags |= MDF_FIRMWARE;
678 }
679
680 /*
681 * Ensure that the direct map region does not contain any mappings
682 * that span MTRRs of different types. However, the fixed MTRRs can
683 * be ignored, because a large page mapping the first 1 MB of physical
684 * memory is a special case that the processor handles. Invalidate
685 * any old TLB entries that might hold inconsistent memory type
686 * information.
687 */
688 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
689 mrd = sc->mr_desc + i;
690 for (; i < sc->mr_ndesc; i++, mrd++) {
691 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
692 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE);
693 }
694 }
695
696 /*
697 * Initialise MTRRs on an AP after the BSP has run the init code.
698 */
699 static void
700 amd64_mrAPinit(struct mem_range_softc *sc)
701 {
702
703 amd64_mrstoreone(sc);
704 wrmsr(MSR_MTRRdefType, mtrrdef);
705 }
706
707 /*
708 * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
709 * list.
710 *
711 * XXX Must be called with interrupts enabled.
712 */
713 static void
714 amd64_mrreinit(struct mem_range_softc *sc)
715 {
716 #ifdef SMP
717 /*
718 * We should use ipi_all_but_self() to call other CPUs into a
719 * locking gate, then call a target function to do this work.
720 * The "proper" solution involves a generalised locking gate
721 * implementation, not ready yet.
722 */
723 smp_rendezvous(NULL, (void *)amd64_mrAPinit, NULL, sc);
724 #else
725 disable_intr(); /* disable interrupts */
726 amd64_mrAPinit(sc);
727 enable_intr();
728 #endif
729 }
730
731 static void
732 amd64_mem_drvinit(void *unused)
733 {
734
735 if (mtrrs_disabled)
736 return;
737 if (!(cpu_feature & CPUID_MTRR))
738 return;
739 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
740 return;
741 switch (cpu_vendor_id) {
742 case CPU_VENDOR_INTEL:
743 case CPU_VENDOR_AMD:
744 case CPU_VENDOR_CENTAUR:
745 break;
746 default:
747 return;
748 }
749 mem_range_softc.mr_op = &amd64_mrops;
750 }
751 SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);
Cache object: c49b5b998174ecdfa60adf4343062c86
|