1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.2/sys/amd64/amd64/amd64_mem.c 217506 2011-01-17 17:30:35Z jkim $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37
38 #include <vm/vm.h>
39 #include <vm/vm_param.h>
40 #include <vm/pmap.h>
41
42 #include <machine/cputypes.h>
43 #include <machine/md_var.h>
44 #include <machine/specialreg.h>
45
46 /*
47 * amd64 memory range operations
48 *
49 * This code will probably be impenetrable without reference to the
50 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
51 */
52
53 static char *mem_owner_bios = "BIOS";
54
55 #define MR686_FIXMTRR (1<<0)
56
57 #define mrwithin(mr, a) \
58 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
59 #define mroverlap(mra, mrb) \
60 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
61
62 #define mrvalid(base, len) \
63 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
64 ((len) >= (1 << 12)) && /* length is >= 4k */ \
65 powerof2((len)) && /* ... and power of two */ \
66 !((base) & ((len) - 1))) /* range is not discontiuous */
67
68 #define mrcopyflags(curr, new) \
69 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
70
71 static int mtrrs_disabled;
72 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
73 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
74 &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
75
76 static void amd64_mrinit(struct mem_range_softc *sc);
77 static int amd64_mrset(struct mem_range_softc *sc,
78 struct mem_range_desc *mrd, int *arg);
79 static void amd64_mrAPinit(struct mem_range_softc *sc);
80 static void amd64_mrreinit(struct mem_range_softc *sc);
81
82 static struct mem_range_ops amd64_mrops = {
83 amd64_mrinit,
84 amd64_mrset,
85 amd64_mrAPinit,
86 amd64_mrreinit
87 };
88
89 /* XXX for AP startup hook */
90 static u_int64_t mtrrcap, mtrrdef;
91
92 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
93 static u_int64_t mtrr_physmask;
94
95 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
96 struct mem_range_desc *mrd);
97 static void amd64_mrfetch(struct mem_range_softc *sc);
98 static int amd64_mtrrtype(int flags);
99 static int amd64_mrt2mtrr(int flags, int oldval);
100 static int amd64_mtrrconflict(int flag1, int flag2);
101 static void amd64_mrstore(struct mem_range_softc *sc);
102 static void amd64_mrstoreone(void *arg);
103 static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
104 u_int64_t addr);
105 static int amd64_mrsetlow(struct mem_range_softc *sc,
106 struct mem_range_desc *mrd, int *arg);
107 static int amd64_mrsetvariable(struct mem_range_softc *sc,
108 struct mem_range_desc *mrd, int *arg);
109
110 /* amd64 MTRR type to memory range type conversion */
111 static int amd64_mtrrtomrt[] = {
112 MDF_UNCACHEABLE,
113 MDF_WRITECOMBINE,
114 MDF_UNKNOWN,
115 MDF_UNKNOWN,
116 MDF_WRITETHROUGH,
117 MDF_WRITEPROTECT,
118 MDF_WRITEBACK
119 };
120
121 #define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
122
123 static int
124 amd64_mtrr2mrt(int val)
125 {
126
127 if (val < 0 || val >= MTRRTOMRTLEN)
128 return (MDF_UNKNOWN);
129 return (amd64_mtrrtomrt[val]);
130 }
131
132 /*
133 * amd64 MTRR conflicts. Writeback and uncachable may overlap.
134 */
135 static int
136 amd64_mtrrconflict(int flag1, int flag2)
137 {
138
139 flag1 &= MDF_ATTRMASK;
140 flag2 &= MDF_ATTRMASK;
141 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
142 return (1);
143 if (flag1 == flag2 ||
144 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
145 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
146 return (0);
147 return (1);
148 }
149
150 /*
151 * Look for an exactly-matching range.
152 */
153 static struct mem_range_desc *
154 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
155 {
156 struct mem_range_desc *cand;
157 int i;
158
159 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
160 if ((cand->mr_base == mrd->mr_base) &&
161 (cand->mr_len == mrd->mr_len))
162 return (cand);
163 return (NULL);
164 }
165
166 /*
167 * Fetch the current mtrr settings from the current CPU (assumed to
168 * all be in sync in the SMP case). Note that if we are here, we
169 * assume that MTRRs are enabled, and we may or may not have fixed
170 * MTRRs.
171 */
172 static void
173 amd64_mrfetch(struct mem_range_softc *sc)
174 {
175 struct mem_range_desc *mrd;
176 u_int64_t msrv;
177 int i, j, msr;
178
179 mrd = sc->mr_desc;
180
181 /* Get fixed-range MTRRs. */
182 if (sc->mr_cap & MR686_FIXMTRR) {
183 msr = MSR_MTRR64kBase;
184 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
185 msrv = rdmsr(msr);
186 for (j = 0; j < 8; j++, mrd++) {
187 mrd->mr_flags =
188 (mrd->mr_flags & ~MDF_ATTRMASK) |
189 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
190 if (mrd->mr_owner[0] == 0)
191 strcpy(mrd->mr_owner, mem_owner_bios);
192 msrv = msrv >> 8;
193 }
194 }
195 msr = MSR_MTRR16kBase;
196 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
197 msrv = rdmsr(msr);
198 for (j = 0; j < 8; j++, mrd++) {
199 mrd->mr_flags =
200 (mrd->mr_flags & ~MDF_ATTRMASK) |
201 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
202 if (mrd->mr_owner[0] == 0)
203 strcpy(mrd->mr_owner, mem_owner_bios);
204 msrv = msrv >> 8;
205 }
206 }
207 msr = MSR_MTRR4kBase;
208 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
209 msrv = rdmsr(msr);
210 for (j = 0; j < 8; j++, mrd++) {
211 mrd->mr_flags =
212 (mrd->mr_flags & ~MDF_ATTRMASK) |
213 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
214 if (mrd->mr_owner[0] == 0)
215 strcpy(mrd->mr_owner, mem_owner_bios);
216 msrv = msrv >> 8;
217 }
218 }
219 }
220
221 /* Get remainder which must be variable MTRRs. */
222 msr = MSR_MTRRVarBase;
223 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
224 msrv = rdmsr(msr);
225 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
226 amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
227 mrd->mr_base = msrv & mtrr_physmask;
228 msrv = rdmsr(msr + 1);
229 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
230 (mrd->mr_flags | MDF_ACTIVE) :
231 (mrd->mr_flags & ~MDF_ACTIVE);
232
233 /* Compute the range from the mask. Ick. */
234 mrd->mr_len = (~(msrv & mtrr_physmask) &
235 (mtrr_physmask | 0xfffL)) + 1;
236 if (!mrvalid(mrd->mr_base, mrd->mr_len))
237 mrd->mr_flags |= MDF_BOGUS;
238
239 /* If unclaimed and active, must be the BIOS. */
240 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
241 strcpy(mrd->mr_owner, mem_owner_bios);
242 }
243 }
244
245 /*
246 * Return the MTRR memory type matching a region's flags
247 */
248 static int
249 amd64_mtrrtype(int flags)
250 {
251 int i;
252
253 flags &= MDF_ATTRMASK;
254
255 for (i = 0; i < MTRRTOMRTLEN; i++) {
256 if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
257 continue;
258 if (flags == amd64_mtrrtomrt[i])
259 return (i);
260 }
261 return (-1);
262 }
263
264 static int
265 amd64_mrt2mtrr(int flags, int oldval)
266 {
267 int val;
268
269 if ((val = amd64_mtrrtype(flags)) == -1)
270 return (oldval & 0xff);
271 return (val & 0xff);
272 }
273
274 /*
275 * Update running CPU(s) MTRRs to match the ranges in the descriptor
276 * list.
277 *
278 * XXX Must be called with interrupts enabled.
279 */
280 static void
281 amd64_mrstore(struct mem_range_softc *sc)
282 {
283 #ifdef SMP
284 /*
285 * We should use ipi_all_but_self() to call other CPUs into a
286 * locking gate, then call a target function to do this work.
287 * The "proper" solution involves a generalised locking gate
288 * implementation, not ready yet.
289 */
290 smp_rendezvous(NULL, amd64_mrstoreone, NULL, sc);
291 #else
292 disable_intr(); /* disable interrupts */
293 amd64_mrstoreone(sc);
294 enable_intr();
295 #endif
296 }
297
298 /*
299 * Update the current CPU's MTRRs with those represented in the
300 * descriptor list. Note that we do this wholesale rather than just
301 * stuffing one entry; this is simpler (but slower, of course).
302 */
303 static void
304 amd64_mrstoreone(void *arg)
305 {
306 struct mem_range_softc *sc = arg;
307 struct mem_range_desc *mrd;
308 u_int64_t omsrv, msrv;
309 int i, j, msr;
310 u_long cr0, cr4;
311
312 mrd = sc->mr_desc;
313
314 critical_enter();
315
316 /* Disable PGE. */
317 cr4 = rcr4();
318 load_cr4(cr4 & ~CR4_PGE);
319
320 /* Disable caches (CD = 1, NW = 0). */
321 cr0 = rcr0();
322 load_cr0((cr0 & ~CR0_NW) | CR0_CD);
323
324 /* Flushes caches and TLBs. */
325 wbinvd();
326 invltlb();
327
328 /* Disable MTRRs (E = 0). */
329 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
330
331 /* Set fixed-range MTRRs. */
332 if (sc->mr_cap & MR686_FIXMTRR) {
333 msr = MSR_MTRR64kBase;
334 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
335 msrv = 0;
336 omsrv = rdmsr(msr);
337 for (j = 7; j >= 0; j--) {
338 msrv = msrv << 8;
339 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
340 omsrv >> (j * 8));
341 }
342 wrmsr(msr, msrv);
343 mrd += 8;
344 }
345 msr = MSR_MTRR16kBase;
346 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
347 msrv = 0;
348 omsrv = rdmsr(msr);
349 for (j = 7; j >= 0; j--) {
350 msrv = msrv << 8;
351 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
352 omsrv >> (j * 8));
353 }
354 wrmsr(msr, msrv);
355 mrd += 8;
356 }
357 msr = MSR_MTRR4kBase;
358 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
359 msrv = 0;
360 omsrv = rdmsr(msr);
361 for (j = 7; j >= 0; j--) {
362 msrv = msrv << 8;
363 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
364 omsrv >> (j * 8));
365 }
366 wrmsr(msr, msrv);
367 mrd += 8;
368 }
369 }
370
371 /* Set remainder which must be variable MTRRs. */
372 msr = MSR_MTRRVarBase;
373 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
374 /* base/type register */
375 omsrv = rdmsr(msr);
376 if (mrd->mr_flags & MDF_ACTIVE) {
377 msrv = mrd->mr_base & mtrr_physmask;
378 msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
379 } else {
380 msrv = 0;
381 }
382 wrmsr(msr, msrv);
383
384 /* mask/active register */
385 if (mrd->mr_flags & MDF_ACTIVE) {
386 msrv = MTRR_PHYSMASK_VALID |
387 (~(mrd->mr_len - 1) & mtrr_physmask);
388 } else {
389 msrv = 0;
390 }
391 wrmsr(msr + 1, msrv);
392 }
393
394 /* Flush caches and TLBs. */
395 wbinvd();
396 invltlb();
397
398 /* Enable MTRRs. */
399 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
400
401 /* Restore caches and PGE. */
402 load_cr0(cr0);
403 load_cr4(cr4);
404
405 critical_exit();
406 }
407
408 /*
409 * Hunt for the fixed MTRR referencing (addr)
410 */
411 static struct mem_range_desc *
412 amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
413 {
414 struct mem_range_desc *mrd;
415 int i;
416
417 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
418 i++, mrd++)
419 if ((addr >= mrd->mr_base) &&
420 (addr < (mrd->mr_base + mrd->mr_len)))
421 return (mrd);
422 return (NULL);
423 }
424
425 /*
426 * Try to satisfy the given range request by manipulating the fixed
427 * MTRRs that cover low memory.
428 *
429 * Note that we try to be generous here; we'll bloat the range out to
430 * the next higher/lower boundary to avoid the consumer having to know
431 * too much about the mechanisms here.
432 *
433 * XXX note that this will have to be updated when we start supporting
434 * "busy" ranges.
435 */
436 static int
437 amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
438 {
439 struct mem_range_desc *first_md, *last_md, *curr_md;
440
441 /* Range check. */
442 if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
443 ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
444 return (EINVAL);
445
446 /* Check that we aren't doing something risky. */
447 if (!(mrd->mr_flags & MDF_FORCE))
448 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
449 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
450 return (EACCES);
451 }
452
453 /* Set flags, clear set-by-firmware flag. */
454 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
455 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
456 ~MDF_FIRMWARE, mrd->mr_flags);
457 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
458 }
459
460 return (0);
461 }
462
463 /*
464 * Modify/add a variable MTRR to satisfy the request.
465 *
466 * XXX needs to be updated to properly support "busy" ranges.
467 */
468 static int
469 amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
470 int *arg)
471 {
472 struct mem_range_desc *curr_md, *free_md;
473 int i;
474
475 /*
476 * Scan the currently active variable descriptors, look for
477 * one we exactly match (straight takeover) and for possible
478 * accidental overlaps.
479 *
480 * Keep track of the first empty variable descriptor in case
481 * we can't perform a takeover.
482 */
483 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
484 curr_md = sc->mr_desc + i;
485 free_md = NULL;
486 for (; i < sc->mr_ndesc; i++, curr_md++) {
487 if (curr_md->mr_flags & MDF_ACTIVE) {
488 /* Exact match? */
489 if ((curr_md->mr_base == mrd->mr_base) &&
490 (curr_md->mr_len == mrd->mr_len)) {
491
492 /* Whoops, owned by someone. */
493 if (curr_md->mr_flags & MDF_BUSY)
494 return (EBUSY);
495
496 /* Check that we aren't doing something risky */
497 if (!(mrd->mr_flags & MDF_FORCE) &&
498 ((curr_md->mr_flags & MDF_ATTRMASK) ==
499 MDF_UNKNOWN))
500 return (EACCES);
501
502 /* Ok, just hijack this entry. */
503 free_md = curr_md;
504 break;
505 }
506
507 /* Non-exact overlap? */
508 if (mroverlap(curr_md, mrd)) {
509 /* Between conflicting region types? */
510 if (amd64_mtrrconflict(curr_md->mr_flags,
511 mrd->mr_flags))
512 return (EINVAL);
513 }
514 } else if (free_md == NULL) {
515 free_md = curr_md;
516 }
517 }
518
519 /* Got somewhere to put it? */
520 if (free_md == NULL)
521 return (ENOSPC);
522
523 /* Set up new descriptor. */
524 free_md->mr_base = mrd->mr_base;
525 free_md->mr_len = mrd->mr_len;
526 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
527 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
528 return (0);
529 }
530
531 /*
532 * Handle requests to set memory range attributes by manipulating MTRRs.
533 */
534 static int
535 amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
536 {
537 struct mem_range_desc *targ;
538 int error, i;
539
540 switch (*arg) {
541 case MEMRANGE_SET_UPDATE:
542 /*
543 * Make sure that what's being asked for is even
544 * possible at all.
545 */
546 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
547 amd64_mtrrtype(mrd->mr_flags) == -1)
548 return (EINVAL);
549
550 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
551
552 /* Are the "low memory" conditions applicable? */
553 if ((sc->mr_cap & MR686_FIXMTRR) &&
554 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
555 if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
556 return (error);
557 } else {
558 /* It's time to play with variable MTRRs. */
559 if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
560 return (error);
561 }
562 break;
563
564 case MEMRANGE_SET_REMOVE:
565 if ((targ = mem_range_match(sc, mrd)) == NULL)
566 return (ENOENT);
567 if (targ->mr_flags & MDF_FIXACTIVE)
568 return (EPERM);
569 if (targ->mr_flags & MDF_BUSY)
570 return (EBUSY);
571 targ->mr_flags &= ~MDF_ACTIVE;
572 targ->mr_owner[0] = 0;
573 break;
574
575 default:
576 return (EOPNOTSUPP);
577 }
578
579 /*
580 * Ensure that the direct map region does not contain any mappings
581 * that span MTRRs of different types. However, the fixed MTRRs can
582 * be ignored, because a large page mapping the first 1 MB of physical
583 * memory is a special case that the processor handles. The entire
584 * TLB will be invalidated by amd64_mrstore(), so pmap_demote_DMAP()
585 * needn't do it.
586 */
587 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
588 mrd = sc->mr_desc + i;
589 for (; i < sc->mr_ndesc; i++, mrd++) {
590 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
591 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, FALSE);
592 }
593
594 /* Update the hardware. */
595 amd64_mrstore(sc);
596
597 /* Refetch to see where we're at. */
598 amd64_mrfetch(sc);
599 return (0);
600 }
601
602 /*
603 * Work out how many ranges we support, initialise storage for them,
604 * and fetch the initial settings.
605 */
606 static void
607 amd64_mrinit(struct mem_range_softc *sc)
608 {
609 struct mem_range_desc *mrd;
610 u_int regs[4];
611 int i, nmdesc = 0, pabits;
612
613 mtrrcap = rdmsr(MSR_MTRRcap);
614 mtrrdef = rdmsr(MSR_MTRRdefType);
615
616 /* For now, bail out if MTRRs are not enabled. */
617 if (!(mtrrdef & MTRR_DEF_ENABLE)) {
618 if (bootverbose)
619 printf("CPU supports MTRRs but not enabled\n");
620 return;
621 }
622 nmdesc = mtrrcap & MTRR_CAP_VCNT;
623
624 /*
625 * Determine the size of the PhysMask and PhysBase fields in
626 * the variable range MTRRs. If the extended CPUID 0x80000008
627 * is present, use that to figure out how many physical
628 * address bits the CPU supports. Otherwise, default to 36
629 * address bits.
630 */
631 if (cpu_exthigh >= 0x80000008) {
632 do_cpuid(0x80000008, regs);
633 pabits = regs[0] & 0xff;
634 } else
635 pabits = 36;
636 mtrr_physmask = ((1UL << pabits) - 1) & ~0xfffUL;
637
638 /* If fixed MTRRs supported and enabled. */
639 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
640 sc->mr_cap = MR686_FIXMTRR;
641 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
642 }
643
644 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
645 M_WAITOK | M_ZERO);
646 sc->mr_ndesc = nmdesc;
647
648 mrd = sc->mr_desc;
649
650 /* Populate the fixed MTRR entries' base/length. */
651 if (sc->mr_cap & MR686_FIXMTRR) {
652 for (i = 0; i < MTRR_N64K; i++, mrd++) {
653 mrd->mr_base = i * 0x10000;
654 mrd->mr_len = 0x10000;
655 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
656 MDF_FIXACTIVE;
657 }
658 for (i = 0; i < MTRR_N16K; i++, mrd++) {
659 mrd->mr_base = i * 0x4000 + 0x80000;
660 mrd->mr_len = 0x4000;
661 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
662 MDF_FIXACTIVE;
663 }
664 for (i = 0; i < MTRR_N4K; i++, mrd++) {
665 mrd->mr_base = i * 0x1000 + 0xc0000;
666 mrd->mr_len = 0x1000;
667 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
668 MDF_FIXACTIVE;
669 }
670 }
671
672 /*
673 * Get current settings, anything set now is considered to
674 * have been set by the firmware. (XXX has something already
675 * played here?)
676 */
677 amd64_mrfetch(sc);
678 mrd = sc->mr_desc;
679 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
680 if (mrd->mr_flags & MDF_ACTIVE)
681 mrd->mr_flags |= MDF_FIRMWARE;
682 }
683
684 /*
685 * Ensure that the direct map region does not contain any mappings
686 * that span MTRRs of different types. However, the fixed MTRRs can
687 * be ignored, because a large page mapping the first 1 MB of physical
688 * memory is a special case that the processor handles. Invalidate
689 * any old TLB entries that might hold inconsistent memory type
690 * information.
691 */
692 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
693 mrd = sc->mr_desc + i;
694 for (; i < sc->mr_ndesc; i++, mrd++) {
695 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
696 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE);
697 }
698 }
699
700 /*
701 * Initialise MTRRs on an AP after the BSP has run the init code.
702 */
703 static void
704 amd64_mrAPinit(struct mem_range_softc *sc)
705 {
706
707 amd64_mrstoreone(sc);
708 wrmsr(MSR_MTRRdefType, mtrrdef);
709 }
710
711 /*
712 * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
713 * list.
714 *
715 * XXX Must be called with interrupts enabled.
716 */
717 static void
718 amd64_mrreinit(struct mem_range_softc *sc)
719 {
720 #ifdef SMP
721 /*
722 * We should use ipi_all_but_self() to call other CPUs into a
723 * locking gate, then call a target function to do this work.
724 * The "proper" solution involves a generalised locking gate
725 * implementation, not ready yet.
726 */
727 smp_rendezvous(NULL, (void *)amd64_mrAPinit, NULL, sc);
728 #else
729 disable_intr(); /* disable interrupts */
730 amd64_mrAPinit(sc);
731 enable_intr();
732 #endif
733 }
734
735 static void
736 amd64_mem_drvinit(void *unused)
737 {
738
739 if (mtrrs_disabled)
740 return;
741 if (!(cpu_feature & CPUID_MTRR))
742 return;
743 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
744 return;
745 switch (cpu_vendor_id) {
746 case CPU_VENDOR_INTEL:
747 case CPU_VENDOR_AMD:
748 case CPU_VENDOR_CENTAUR:
749 break;
750 default:
751 return;
752 }
753 mem_range_softc.mr_op = &amd64_mrops;
754 }
755 SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);
Cache object: 5bb8d1eb9a265341f2261d81909ea069
|