1 /*-
2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.1/sys/amd64/amd64/amd64_mem.c 206165 2010-04-04 15:42:52Z rnoland $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/memrange.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41
42 /*
43 * amd64 memory range operations
44 *
45 * This code will probably be impenetrable without reference to the
46 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
47 */
48
49 static char *mem_owner_bios = "BIOS";
50
51 #define MR686_FIXMTRR (1<<0)
52
53 #define mrwithin(mr, a) \
54 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
55 #define mroverlap(mra, mrb) \
56 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
57
58 #define mrvalid(base, len) \
59 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
60 ((len) >= (1 << 12)) && /* length is >= 4k */ \
61 powerof2((len)) && /* ... and power of two */ \
62 !((base) & ((len) - 1))) /* range is not discontiuous */
63
64 #define mrcopyflags(curr, new) \
65 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
66
67 static int mtrrs_disabled;
68 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
69 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN,
70 &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
71
72 static void amd64_mrinit(struct mem_range_softc *sc);
73 static int amd64_mrset(struct mem_range_softc *sc,
74 struct mem_range_desc *mrd, int *arg);
75 static void amd64_mrAPinit(struct mem_range_softc *sc);
76 static void amd64_mrreinit(struct mem_range_softc *sc);
77
78 static struct mem_range_ops amd64_mrops = {
79 amd64_mrinit,
80 amd64_mrset,
81 amd64_mrAPinit,
82 amd64_mrreinit
83 };
84
85 /* XXX for AP startup hook */
86 static u_int64_t mtrrcap, mtrrdef;
87
88 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
89 static u_int64_t mtrr_physmask;
90
91 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
92 struct mem_range_desc *mrd);
93 static void amd64_mrfetch(struct mem_range_softc *sc);
94 static int amd64_mtrrtype(int flags);
95 static int amd64_mrt2mtrr(int flags, int oldval);
96 static int amd64_mtrrconflict(int flag1, int flag2);
97 static void amd64_mrstore(struct mem_range_softc *sc);
98 static void amd64_mrstoreone(void *arg);
99 static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
100 u_int64_t addr);
101 static int amd64_mrsetlow(struct mem_range_softc *sc,
102 struct mem_range_desc *mrd, int *arg);
103 static int amd64_mrsetvariable(struct mem_range_softc *sc,
104 struct mem_range_desc *mrd, int *arg);
105
106 /* amd64 MTRR type to memory range type conversion */
107 static int amd64_mtrrtomrt[] = {
108 MDF_UNCACHEABLE,
109 MDF_WRITECOMBINE,
110 MDF_UNKNOWN,
111 MDF_UNKNOWN,
112 MDF_WRITETHROUGH,
113 MDF_WRITEPROTECT,
114 MDF_WRITEBACK
115 };
116
117 #define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
118
119 static int
120 amd64_mtrr2mrt(int val)
121 {
122
123 if (val < 0 || val >= MTRRTOMRTLEN)
124 return (MDF_UNKNOWN);
125 return (amd64_mtrrtomrt[val]);
126 }
127
128 /*
129 * amd64 MTRR conflicts. Writeback and uncachable may overlap.
130 */
131 static int
132 amd64_mtrrconflict(int flag1, int flag2)
133 {
134
135 flag1 &= MDF_ATTRMASK;
136 flag2 &= MDF_ATTRMASK;
137 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
138 return (1);
139 if (flag1 == flag2 ||
140 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
141 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
142 return (0);
143 return (1);
144 }
145
146 /*
147 * Look for an exactly-matching range.
148 */
149 static struct mem_range_desc *
150 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
151 {
152 struct mem_range_desc *cand;
153 int i;
154
155 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
156 if ((cand->mr_base == mrd->mr_base) &&
157 (cand->mr_len == mrd->mr_len))
158 return (cand);
159 return (NULL);
160 }
161
162 /*
163 * Fetch the current mtrr settings from the current CPU (assumed to
164 * all be in sync in the SMP case). Note that if we are here, we
165 * assume that MTRRs are enabled, and we may or may not have fixed
166 * MTRRs.
167 */
168 static void
169 amd64_mrfetch(struct mem_range_softc *sc)
170 {
171 struct mem_range_desc *mrd;
172 u_int64_t msrv;
173 int i, j, msr;
174
175 mrd = sc->mr_desc;
176
177 /* Get fixed-range MTRRs. */
178 if (sc->mr_cap & MR686_FIXMTRR) {
179 msr = MSR_MTRR64kBase;
180 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
181 msrv = rdmsr(msr);
182 for (j = 0; j < 8; j++, mrd++) {
183 mrd->mr_flags =
184 (mrd->mr_flags & ~MDF_ATTRMASK) |
185 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
186 if (mrd->mr_owner[0] == 0)
187 strcpy(mrd->mr_owner, mem_owner_bios);
188 msrv = msrv >> 8;
189 }
190 }
191 msr = MSR_MTRR16kBase;
192 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
193 msrv = rdmsr(msr);
194 for (j = 0; j < 8; j++, mrd++) {
195 mrd->mr_flags =
196 (mrd->mr_flags & ~MDF_ATTRMASK) |
197 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
198 if (mrd->mr_owner[0] == 0)
199 strcpy(mrd->mr_owner, mem_owner_bios);
200 msrv = msrv >> 8;
201 }
202 }
203 msr = MSR_MTRR4kBase;
204 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
205 msrv = rdmsr(msr);
206 for (j = 0; j < 8; j++, mrd++) {
207 mrd->mr_flags =
208 (mrd->mr_flags & ~MDF_ATTRMASK) |
209 amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
210 if (mrd->mr_owner[0] == 0)
211 strcpy(mrd->mr_owner, mem_owner_bios);
212 msrv = msrv >> 8;
213 }
214 }
215 }
216
217 /* Get remainder which must be variable MTRRs. */
218 msr = MSR_MTRRVarBase;
219 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
220 msrv = rdmsr(msr);
221 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
222 amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
223 mrd->mr_base = msrv & mtrr_physmask;
224 msrv = rdmsr(msr + 1);
225 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
226 (mrd->mr_flags | MDF_ACTIVE) :
227 (mrd->mr_flags & ~MDF_ACTIVE);
228
229 /* Compute the range from the mask. Ick. */
230 mrd->mr_len = (~(msrv & mtrr_physmask) &
231 (mtrr_physmask | 0xfffL)) + 1;
232 if (!mrvalid(mrd->mr_base, mrd->mr_len))
233 mrd->mr_flags |= MDF_BOGUS;
234
235 /* If unclaimed and active, must be the BIOS. */
236 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
237 strcpy(mrd->mr_owner, mem_owner_bios);
238 }
239 }
240
241 /*
242 * Return the MTRR memory type matching a region's flags
243 */
244 static int
245 amd64_mtrrtype(int flags)
246 {
247 int i;
248
249 flags &= MDF_ATTRMASK;
250
251 for (i = 0; i < MTRRTOMRTLEN; i++) {
252 if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
253 continue;
254 if (flags == amd64_mtrrtomrt[i])
255 return (i);
256 }
257 return (-1);
258 }
259
260 static int
261 amd64_mrt2mtrr(int flags, int oldval)
262 {
263 int val;
264
265 if ((val = amd64_mtrrtype(flags)) == -1)
266 return (oldval & 0xff);
267 return (val & 0xff);
268 }
269
270 /*
271 * Update running CPU(s) MTRRs to match the ranges in the descriptor
272 * list.
273 *
274 * XXX Must be called with interrupts enabled.
275 */
276 static void
277 amd64_mrstore(struct mem_range_softc *sc)
278 {
279 #ifdef SMP
280 /*
281 * We should use ipi_all_but_self() to call other CPUs into a
282 * locking gate, then call a target function to do this work.
283 * The "proper" solution involves a generalised locking gate
284 * implementation, not ready yet.
285 */
286 smp_rendezvous(NULL, amd64_mrstoreone, NULL, sc);
287 #else
288 disable_intr(); /* disable interrupts */
289 amd64_mrstoreone(sc);
290 enable_intr();
291 #endif
292 }
293
294 /*
295 * Update the current CPU's MTRRs with those represented in the
296 * descriptor list. Note that we do this wholesale rather than just
297 * stuffing one entry; this is simpler (but slower, of course).
298 */
299 static void
300 amd64_mrstoreone(void *arg)
301 {
302 struct mem_range_softc *sc = arg;
303 struct mem_range_desc *mrd;
304 u_int64_t omsrv, msrv;
305 int i, j, msr;
306 u_int cr4save;
307
308 mrd = sc->mr_desc;
309
310 /* Disable PGE. */
311 cr4save = rcr4();
312 if (cr4save & CR4_PGE)
313 load_cr4(cr4save & ~CR4_PGE);
314
315 /* Disable caches (CD = 1, NW = 0). */
316 load_cr0((rcr0() & ~CR0_NW) | CR0_CD);
317
318 /* Flushes caches and TLBs. */
319 wbinvd();
320
321 /* Disable MTRRs (E = 0). */
322 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
323
324 /* Set fixed-range MTRRs. */
325 if (sc->mr_cap & MR686_FIXMTRR) {
326 msr = MSR_MTRR64kBase;
327 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
328 msrv = 0;
329 omsrv = rdmsr(msr);
330 for (j = 7; j >= 0; j--) {
331 msrv = msrv << 8;
332 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
333 omsrv >> (j * 8));
334 }
335 wrmsr(msr, msrv);
336 mrd += 8;
337 }
338 msr = MSR_MTRR16kBase;
339 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
340 msrv = 0;
341 omsrv = rdmsr(msr);
342 for (j = 7; j >= 0; j--) {
343 msrv = msrv << 8;
344 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
345 omsrv >> (j * 8));
346 }
347 wrmsr(msr, msrv);
348 mrd += 8;
349 }
350 msr = MSR_MTRR4kBase;
351 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
352 msrv = 0;
353 omsrv = rdmsr(msr);
354 for (j = 7; j >= 0; j--) {
355 msrv = msrv << 8;
356 msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
357 omsrv >> (j * 8));
358 }
359 wrmsr(msr, msrv);
360 mrd += 8;
361 }
362 }
363
364 /* Set remainder which must be variable MTRRs. */
365 msr = MSR_MTRRVarBase;
366 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
367 /* base/type register */
368 omsrv = rdmsr(msr);
369 if (mrd->mr_flags & MDF_ACTIVE) {
370 msrv = mrd->mr_base & mtrr_physmask;
371 msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
372 } else {
373 msrv = 0;
374 }
375 wrmsr(msr, msrv);
376
377 /* mask/active register */
378 if (mrd->mr_flags & MDF_ACTIVE) {
379 msrv = MTRR_PHYSMASK_VALID |
380 (~(mrd->mr_len - 1) & mtrr_physmask);
381 } else {
382 msrv = 0;
383 }
384 wrmsr(msr + 1, msrv);
385 }
386
387 /* Flush caches, TLBs. */
388 wbinvd();
389
390 /* Enable MTRRs. */
391 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
392
393 /* Enable caches (CD = 0, NW = 0). */
394 load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
395
396 /* Restore PGE. */
397 load_cr4(cr4save);
398 }
399
400 /*
401 * Hunt for the fixed MTRR referencing (addr)
402 */
403 static struct mem_range_desc *
404 amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
405 {
406 struct mem_range_desc *mrd;
407 int i;
408
409 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
410 i++, mrd++)
411 if ((addr >= mrd->mr_base) &&
412 (addr < (mrd->mr_base + mrd->mr_len)))
413 return (mrd);
414 return (NULL);
415 }
416
417 /*
418 * Try to satisfy the given range request by manipulating the fixed
419 * MTRRs that cover low memory.
420 *
421 * Note that we try to be generous here; we'll bloat the range out to
422 * the next higher/lower boundary to avoid the consumer having to know
423 * too much about the mechanisms here.
424 *
425 * XXX note that this will have to be updated when we start supporting
426 * "busy" ranges.
427 */
428 static int
429 amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
430 {
431 struct mem_range_desc *first_md, *last_md, *curr_md;
432
433 /* Range check. */
434 if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
435 ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
436 return (EINVAL);
437
438 /* Check that we aren't doing something risky. */
439 if (!(mrd->mr_flags & MDF_FORCE))
440 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
441 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
442 return (EACCES);
443 }
444
445 /* Set flags, clear set-by-firmware flag. */
446 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
447 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
448 ~MDF_FIRMWARE, mrd->mr_flags);
449 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
450 }
451
452 return (0);
453 }
454
455 /*
456 * Modify/add a variable MTRR to satisfy the request.
457 *
458 * XXX needs to be updated to properly support "busy" ranges.
459 */
460 static int
461 amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
462 int *arg)
463 {
464 struct mem_range_desc *curr_md, *free_md;
465 int i;
466
467 /*
468 * Scan the currently active variable descriptors, look for
469 * one we exactly match (straight takeover) and for possible
470 * accidental overlaps.
471 *
472 * Keep track of the first empty variable descriptor in case
473 * we can't perform a takeover.
474 */
475 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
476 curr_md = sc->mr_desc + i;
477 free_md = NULL;
478 for (; i < sc->mr_ndesc; i++, curr_md++) {
479 if (curr_md->mr_flags & MDF_ACTIVE) {
480 /* Exact match? */
481 if ((curr_md->mr_base == mrd->mr_base) &&
482 (curr_md->mr_len == mrd->mr_len)) {
483
484 /* Whoops, owned by someone. */
485 if (curr_md->mr_flags & MDF_BUSY)
486 return (EBUSY);
487
488 /* Check that we aren't doing something risky */
489 if (!(mrd->mr_flags & MDF_FORCE) &&
490 ((curr_md->mr_flags & MDF_ATTRMASK) ==
491 MDF_UNKNOWN))
492 return (EACCES);
493
494 /* Ok, just hijack this entry. */
495 free_md = curr_md;
496 break;
497 }
498
499 /* Non-exact overlap? */
500 if (mroverlap(curr_md, mrd)) {
501 /* Between conflicting region types? */
502 if (amd64_mtrrconflict(curr_md->mr_flags,
503 mrd->mr_flags))
504 return (EINVAL);
505 }
506 } else if (free_md == NULL) {
507 free_md = curr_md;
508 }
509 }
510
511 /* Got somewhere to put it? */
512 if (free_md == NULL)
513 return (ENOSPC);
514
515 /* Set up new descriptor. */
516 free_md->mr_base = mrd->mr_base;
517 free_md->mr_len = mrd->mr_len;
518 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
519 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
520 return (0);
521 }
522
523 /*
524 * Handle requests to set memory range attributes by manipulating MTRRs.
525 */
526 static int
527 amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
528 {
529 struct mem_range_desc *targ;
530 int error = 0;
531
532 switch(*arg) {
533 case MEMRANGE_SET_UPDATE:
534 /*
535 * Make sure that what's being asked for is even
536 * possible at all.
537 */
538 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
539 amd64_mtrrtype(mrd->mr_flags) == -1)
540 return (EINVAL);
541
542 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
543
544 /* Are the "low memory" conditions applicable? */
545 if ((sc->mr_cap & MR686_FIXMTRR) &&
546 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
547 if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
548 return (error);
549 } else {
550 /* It's time to play with variable MTRRs. */
551 if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
552 return (error);
553 }
554 break;
555
556 case MEMRANGE_SET_REMOVE:
557 if ((targ = mem_range_match(sc, mrd)) == NULL)
558 return (ENOENT);
559 if (targ->mr_flags & MDF_FIXACTIVE)
560 return (EPERM);
561 if (targ->mr_flags & MDF_BUSY)
562 return (EBUSY);
563 targ->mr_flags &= ~MDF_ACTIVE;
564 targ->mr_owner[0] = 0;
565 break;
566
567 default:
568 return (EOPNOTSUPP);
569 }
570
571 /* Update the hardware. */
572 amd64_mrstore(sc);
573
574 /* Refetch to see where we're at. */
575 amd64_mrfetch(sc);
576 return (0);
577 }
578
579 /*
580 * Work out how many ranges we support, initialise storage for them,
581 * and fetch the initial settings.
582 */
583 static void
584 amd64_mrinit(struct mem_range_softc *sc)
585 {
586 struct mem_range_desc *mrd;
587 u_int regs[4];
588 int i, nmdesc = 0, pabits;
589
590 mtrrcap = rdmsr(MSR_MTRRcap);
591 mtrrdef = rdmsr(MSR_MTRRdefType);
592
593 /* For now, bail out if MTRRs are not enabled. */
594 if (!(mtrrdef & MTRR_DEF_ENABLE)) {
595 if (bootverbose)
596 printf("CPU supports MTRRs but not enabled\n");
597 return;
598 }
599 nmdesc = mtrrcap & MTRR_CAP_VCNT;
600
601 /*
602 * Determine the size of the PhysMask and PhysBase fields in
603 * the variable range MTRRs. If the extended CPUID 0x80000008
604 * is present, use that to figure out how many physical
605 * address bits the CPU supports. Otherwise, default to 36
606 * address bits.
607 */
608 if (cpu_exthigh >= 0x80000008) {
609 do_cpuid(0x80000008, regs);
610 pabits = regs[0] & 0xff;
611 } else
612 pabits = 36;
613 mtrr_physmask = ((1UL << pabits) - 1) & ~0xfffUL;
614
615 /* If fixed MTRRs supported and enabled. */
616 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
617 sc->mr_cap = MR686_FIXMTRR;
618 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
619 }
620
621 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC,
622 M_WAITOK | M_ZERO);
623 sc->mr_ndesc = nmdesc;
624
625 mrd = sc->mr_desc;
626
627 /* Populate the fixed MTRR entries' base/length. */
628 if (sc->mr_cap & MR686_FIXMTRR) {
629 for (i = 0; i < MTRR_N64K; i++, mrd++) {
630 mrd->mr_base = i * 0x10000;
631 mrd->mr_len = 0x10000;
632 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
633 MDF_FIXACTIVE;
634 }
635 for (i = 0; i < MTRR_N16K; i++, mrd++) {
636 mrd->mr_base = i * 0x4000 + 0x80000;
637 mrd->mr_len = 0x4000;
638 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
639 MDF_FIXACTIVE;
640 }
641 for (i = 0; i < MTRR_N4K; i++, mrd++) {
642 mrd->mr_base = i * 0x1000 + 0xc0000;
643 mrd->mr_len = 0x1000;
644 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
645 MDF_FIXACTIVE;
646 }
647 }
648
649 /*
650 * Get current settings, anything set now is considered to
651 * have been set by the firmware. (XXX has something already
652 * played here?)
653 */
654 amd64_mrfetch(sc);
655 mrd = sc->mr_desc;
656 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
657 if (mrd->mr_flags & MDF_ACTIVE)
658 mrd->mr_flags |= MDF_FIRMWARE;
659 }
660 }
661
662 /*
663 * Initialise MTRRs on an AP after the BSP has run the init code.
664 */
665 static void
666 amd64_mrAPinit(struct mem_range_softc *sc)
667 {
668
669 amd64_mrstoreone(sc);
670 wrmsr(MSR_MTRRdefType, mtrrdef);
671 }
672
673 /*
674 * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
675 * list.
676 *
677 * XXX Must be called with interrupts enabled.
678 */
679 static void
680 amd64_mrreinit(struct mem_range_softc *sc)
681 {
682 #ifdef SMP
683 /*
684 * We should use ipi_all_but_self() to call other CPUs into a
685 * locking gate, then call a target function to do this work.
686 * The "proper" solution involves a generalised locking gate
687 * implementation, not ready yet.
688 */
689 smp_rendezvous(NULL, (void *)amd64_mrAPinit, NULL, sc);
690 #else
691 disable_intr(); /* disable interrupts */
692 amd64_mrAPinit(sc);
693 enable_intr();
694 #endif
695 }
696
697 static void
698 amd64_mem_drvinit(void *unused)
699 {
700
701 if (mtrrs_disabled)
702 return;
703 if (!(cpu_feature & CPUID_MTRR))
704 return;
705 if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
706 return;
707 switch (cpu_vendor_id) {
708 case CPU_VENDOR_INTEL:
709 case CPU_VENDOR_AMD:
710 case CPU_VENDOR_CENTAUR:
711 break;
712 default:
713 return;
714 }
715 mem_range_softc.mr_op = &amd64_mrops;
716 }
717 SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);
Cache object: 2b5c7ab336153bdb0e3e95184b82dac9
|