The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/i686_mem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/5.0/sys/i386/i386/i686_mem.c 106842 2002-11-13 09:37:43Z mdodd $
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/kernel.h>
   31 #include <sys/systm.h>
   32 #include <sys/malloc.h>
   33 #include <sys/memrange.h>
   34 #include <sys/smp.h>
   35 #include <sys/sysctl.h>
   36 
   37 #include <machine/md_var.h>
   38 #include <machine/specialreg.h>
   39 
   40 /*
   41  * i686 memory range operations
   42  *
   43  * This code will probably be impenetrable without reference to the
   44  * Intel Pentium Pro documentation.
   45  */
   46 
   47 static char *mem_owner_bios = "BIOS";
   48 
   49 #define MR686_FIXMTRR   (1<<0)
   50 
   51 #define mrwithin(mr, a) \
   52     (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
   53 #define mroverlap(mra, mrb) \
   54     (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
   55 
   56 #define mrvalid(base, len)                                              \
   57     ((!(base & ((1 << 12) - 1))) &&     /* base is multiple of 4k */    \
   58      ((len) >= (1 << 12)) &&            /* length is >= 4k */           \
   59      powerof2((len)) &&                 /* ... and power of two */      \
   60      !((base) & ((len) - 1)))           /* range is not discontiuous */
   61 
   62 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
   63 
   64 static int                      mtrrs_disabled;
   65 TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
   66 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RD,
   67         &mtrrs_disabled, 0, "Disable i686 MTRRs.");
   68 
   69 static void                     i686_mrinit(struct mem_range_softc *sc);
   70 static int                      i686_mrset(struct mem_range_softc *sc,
   71                                            struct mem_range_desc *mrd,
   72                                            int *arg);
   73 static void                     i686_mrAPinit(struct mem_range_softc *sc);
   74 
   75 static struct mem_range_ops i686_mrops = {
   76     i686_mrinit,
   77     i686_mrset,
   78     i686_mrAPinit
   79 };
   80 
   81 /* XXX for AP startup hook */
   82 static u_int64_t                mtrrcap, mtrrdef;
   83 
   84 static struct mem_range_desc    *mem_range_match(struct mem_range_softc *sc,
   85                                                  struct mem_range_desc *mrd);
   86 static void                     i686_mrfetch(struct mem_range_softc *sc);
   87 static int                      i686_mtrrtype(int flags);
   88 static int                      i686_mrt2mtrr(int flags, int oldval);
   89 static int                      i686_mtrrconflict(int flag1, int flag2);
   90 static void                     i686_mrstore(struct mem_range_softc *sc);
   91 static void                     i686_mrstoreone(void *arg);
   92 static struct mem_range_desc    *i686_mtrrfixsearch(struct mem_range_softc *sc,
   93                                                     u_int64_t addr);
   94 static int                      i686_mrsetlow(struct mem_range_softc *sc,
   95                                               struct mem_range_desc *mrd,
   96                                               int *arg);
   97 static int                      i686_mrsetvariable(struct mem_range_softc *sc,
   98                                                    struct mem_range_desc *mrd,
   99                                                    int *arg);
  100 
  101 /* i686 MTRR type to memory range type conversion */
  102 static int i686_mtrrtomrt[] = {
  103     MDF_UNCACHEABLE,
  104     MDF_WRITECOMBINE,
  105     MDF_UNKNOWN,
  106     MDF_UNKNOWN,
  107     MDF_WRITETHROUGH,
  108     MDF_WRITEPROTECT,
  109     MDF_WRITEBACK
  110 };
  111 
  112 #define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0]))
  113 
  114 static int
  115 i686_mtrr2mrt(int val) {
  116         if (val < 0 || val >= MTRRTOMRTLEN)
  117                 return MDF_UNKNOWN;
  118         return i686_mtrrtomrt[val];
  119 }
  120 
  121 /* 
  122  * i686 MTRR conflicts. Writeback and uncachable may overlap.
  123  */
  124 static int
  125 i686_mtrrconflict(int flag1, int flag2) {
  126         flag1 &= MDF_ATTRMASK;
  127         flag2 &= MDF_ATTRMASK;
  128         if (flag1 == flag2 ||
  129             (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
  130             (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
  131                 return 0;
  132         return 1;
  133 }
  134 
  135 /*
  136  * Look for an exactly-matching range.
  137  */
  138 static struct mem_range_desc *
  139 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 
  140 {
  141     struct mem_range_desc       *cand;
  142     int                         i;
  143         
  144     for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
  145         if ((cand->mr_base == mrd->mr_base) &&
  146             (cand->mr_len == mrd->mr_len))
  147             return(cand);
  148     return(NULL);
  149 }
  150 
  151 /*
  152  * Fetch the current mtrr settings from the current CPU (assumed to all
  153  * be in sync in the SMP case).  Note that if we are here, we assume
  154  * that MTRRs are enabled, and we may or may not have fixed MTRRs.
  155  */
  156 static void
  157 i686_mrfetch(struct mem_range_softc *sc)
  158 {
  159     struct mem_range_desc       *mrd;
  160     u_int64_t                   msrv;
  161     int                         i, j, msr;
  162 
  163     mrd = sc->mr_desc;
  164 
  165     /* Get fixed-range MTRRs */
  166     if (sc->mr_cap & MR686_FIXMTRR) {
  167         msr = MSR_MTRR64kBase;
  168         for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
  169             msrv = rdmsr(msr);
  170             for (j = 0; j < 8; j++, mrd++) {
  171                 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
  172                     i686_mtrr2mrt(msrv & 0xff) |
  173                     MDF_ACTIVE;
  174                 if (mrd->mr_owner[0] == 0)
  175                     strcpy(mrd->mr_owner, mem_owner_bios);
  176                 msrv = msrv >> 8;
  177             }
  178         }
  179         msr = MSR_MTRR16kBase;
  180         for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
  181             msrv = rdmsr(msr);
  182             for (j = 0; j < 8; j++, mrd++) {
  183                 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
  184                     i686_mtrr2mrt(msrv & 0xff) |
  185                     MDF_ACTIVE;
  186                 if (mrd->mr_owner[0] == 0)
  187                     strcpy(mrd->mr_owner, mem_owner_bios);
  188                 msrv = msrv >> 8;
  189             }
  190         }
  191         msr = MSR_MTRR4kBase;
  192         for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
  193             msrv = rdmsr(msr);
  194             for (j = 0; j < 8; j++, mrd++) {
  195                 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
  196                     i686_mtrr2mrt(msrv & 0xff) |
  197                     MDF_ACTIVE;
  198                 if (mrd->mr_owner[0] == 0)
  199                     strcpy(mrd->mr_owner, mem_owner_bios);
  200                 msrv = msrv >> 8;
  201             }
  202         }
  203     }
  204 
  205     /* Get remainder which must be variable MTRRs */
  206     msr = MSR_MTRRVarBase;
  207     for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
  208         msrv = rdmsr(msr);
  209         mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
  210             i686_mtrr2mrt(msrv & 0xff);
  211         mrd->mr_base = msrv & 0x0000000ffffff000LL;
  212         msrv = rdmsr(msr + 1);
  213         mrd->mr_flags = (msrv & 0x800) ? 
  214             (mrd->mr_flags | MDF_ACTIVE) :
  215             (mrd->mr_flags & ~MDF_ACTIVE);
  216         /* Compute the range from the mask. Ick. */
  217         mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1;
  218         if (!mrvalid(mrd->mr_base, mrd->mr_len))
  219             mrd->mr_flags |= MDF_BOGUS;
  220         /* If unclaimed and active, must be the BIOS */
  221         if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
  222             strcpy(mrd->mr_owner, mem_owner_bios);
  223     }
  224 }
  225 
  226 /*
  227  * Return the MTRR memory type matching a region's flags
  228  */
  229 static int
  230 i686_mtrrtype(int flags)
  231 {
  232     int         i;
  233 
  234     flags &= MDF_ATTRMASK;
  235 
  236     for (i = 0; i < MTRRTOMRTLEN; i++) {
  237         if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
  238             continue;
  239         if (flags == i686_mtrrtomrt[i])
  240             return(i);
  241     }
  242     return(-1);
  243 }
  244 
  245 static int
  246 i686_mrt2mtrr(int flags, int oldval)
  247 {
  248         int val;
  249 
  250         if ((val = i686_mtrrtype(flags)) == -1)
  251                 return oldval & 0xff;
  252         return val & 0xff;
  253 }
  254 
  255 /*
  256  * Update running CPU(s) MTRRs to match the ranges in the descriptor
  257  * list.
  258  *
  259  * XXX Must be called with interrupts enabled.
  260  */
  261 static void
  262 i686_mrstore(struct mem_range_softc *sc)
  263 {
  264 #ifdef SMP
  265     /*
  266      * We should use ipi_all_but_self() to call other CPUs into a 
  267      * locking gate, then call a target function to do this work.
  268      * The "proper" solution involves a generalised locking gate
  269      * implementation, not ready yet.
  270      */
  271     smp_rendezvous(NULL, i686_mrstoreone, NULL, (void *)sc);
  272 #else
  273     disable_intr();                             /* disable interrupts */
  274     i686_mrstoreone((void *)sc);
  275     enable_intr();
  276 #endif
  277 }
  278 
  279 /*
  280  * Update the current CPU's MTRRs with those represented in the
  281  * descriptor list.  Note that we do this wholesale rather than
  282  * just stuffing one entry; this is simpler (but slower, of course).
  283  */
  284 static void
  285 i686_mrstoreone(void *arg)
  286 {
  287     struct mem_range_softc      *sc = (struct mem_range_softc *)arg;
  288     struct mem_range_desc       *mrd;
  289     u_int64_t                   omsrv, msrv;
  290     int                         i, j, msr;
  291     u_int                       cr4save;
  292 
  293     mrd = sc->mr_desc;
  294 
  295     cr4save = rcr4();                           /* save cr4 */
  296     if (cr4save & CR4_PGE)
  297         load_cr4(cr4save & ~CR4_PGE);
  298     load_cr0((rcr0() & ~CR0_NW) | CR0_CD);      /* disable caches (CD = 1, NW = 0) */
  299     wbinvd();                                   /* flush caches, TLBs */
  300     wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800);    /* disable MTRRs (E = 0) */
  301 
  302     /* Set fixed-range MTRRs */
  303     if (sc->mr_cap & MR686_FIXMTRR) {
  304         msr = MSR_MTRR64kBase;
  305         for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
  306             msrv = 0;
  307             omsrv = rdmsr(msr);
  308             for (j = 7; j >= 0; j--) {
  309                 msrv = msrv << 8;
  310                 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
  311             }
  312             wrmsr(msr, msrv);
  313             mrd += 8;
  314         }
  315         msr = MSR_MTRR16kBase;
  316         for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
  317             msrv = 0;
  318             omsrv = rdmsr(msr);
  319             for (j = 7; j >= 0; j--) {
  320                 msrv = msrv << 8;
  321                 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
  322             }
  323             wrmsr(msr, msrv);
  324             mrd += 8;
  325         }
  326         msr = MSR_MTRR4kBase;
  327         for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
  328             msrv = 0;
  329             omsrv = rdmsr(msr);
  330             for (j = 7; j >= 0; j--) {
  331                 msrv = msrv << 8;
  332                 msrv |= i686_mrt2mtrr((mrd + j)->mr_flags, omsrv >> (j*8));
  333             }
  334             wrmsr(msr, msrv);
  335             mrd += 8;
  336         }
  337     }
  338 
  339     /* Set remainder which must be variable MTRRs */
  340     msr = MSR_MTRRVarBase;
  341     for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
  342         /* base/type register */
  343         omsrv = rdmsr(msr);
  344         if (mrd->mr_flags & MDF_ACTIVE) {
  345             msrv = mrd->mr_base & 0x0000000ffffff000LL;
  346             msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
  347         } else {
  348             msrv = 0;
  349         }
  350         wrmsr(msr, msrv);       
  351             
  352         /* mask/active register */
  353         if (mrd->mr_flags & MDF_ACTIVE) {
  354             msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL);
  355         } else {
  356             msrv = 0;
  357         }
  358         wrmsr(msr + 1, msrv);
  359     }
  360     wbinvd();                                                   /* flush caches, TLBs */
  361     wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800);     /* restore MTRR state */
  362     load_cr0(rcr0() & ~(CR0_CD | CR0_NW));                      /* enable caches CD = 0 and NW = 0 */
  363     load_cr4(cr4save);                                          /* restore cr4 */
  364 }
  365 
  366 /*
  367  * Hunt for the fixed MTRR referencing (addr)
  368  */
  369 static struct mem_range_desc *
  370 i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
  371 {
  372     struct mem_range_desc *mrd;
  373     int                 i;
  374     
  375     for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
  376         if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
  377             return(mrd);
  378     return(NULL);
  379 }
  380 
  381 /*
  382  * Try to satisfy the given range request by manipulating the fixed MTRRs that
  383  * cover low memory.
  384  *
  385  * Note that we try to be generous here; we'll bloat the range out to the 
  386  * next higher/lower boundary to avoid the consumer having to know too much
  387  * about the mechanisms here.
  388  *
  389  * XXX note that this will have to be updated when we start supporting "busy" ranges.
  390  */
  391 static int
  392 i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
  393 {
  394     struct mem_range_desc       *first_md, *last_md, *curr_md;
  395 
  396     /* range check */
  397     if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
  398         ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
  399         return(EINVAL);
  400 
  401     /* check we aren't doing something risky */
  402     if (!(mrd->mr_flags & MDF_FORCE))
  403         for (curr_md = first_md; curr_md <= last_md; curr_md++) {
  404             if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
  405                 return (EACCES);
  406         }
  407 
  408     /* set flags, clear set-by-firmware flag */
  409     for (curr_md = first_md; curr_md <= last_md; curr_md++) {
  410         curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
  411         bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
  412     }
  413 
  414     return(0);
  415 }
  416 
  417 
  418 /*
  419  * Modify/add a variable MTRR to satisfy the request.
  420  *
  421  * XXX needs to be updated to properly support "busy" ranges.
  422  */
  423 static int
  424 i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
  425 {
  426     struct mem_range_desc       *curr_md, *free_md;
  427     int                         i;
  428     
  429     /* 
  430      * Scan the currently active variable descriptors, look for 
  431      * one we exactly match (straight takeover) and for possible
  432      * accidental overlaps.
  433      * Keep track of the first empty variable descriptor in case we
  434      * can't perform a takeover.
  435      */
  436     i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
  437     curr_md = sc->mr_desc + i;
  438     free_md = NULL;
  439     for (; i < sc->mr_ndesc; i++, curr_md++) {
  440         if (curr_md->mr_flags & MDF_ACTIVE) {
  441             /* exact match? */
  442             if ((curr_md->mr_base == mrd->mr_base) &&
  443                 (curr_md->mr_len == mrd->mr_len)) {
  444                 /* whoops, owned by someone */
  445                 if (curr_md->mr_flags & MDF_BUSY)
  446                     return(EBUSY);
  447                 /* check we aren't doing something risky */
  448                 if (!(mrd->mr_flags & MDF_FORCE) &&
  449                   ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN))
  450                     return (EACCES);
  451                 /* Ok, just hijack this entry */
  452                 free_md = curr_md;
  453                 break;
  454             }
  455             /* non-exact overlap ? */
  456             if (mroverlap(curr_md, mrd)) {
  457                 /* between conflicting region types? */
  458                 if (i686_mtrrconflict(curr_md->mr_flags, mrd->mr_flags))
  459                     return(EINVAL);
  460             }
  461         } else if (free_md == NULL) {
  462             free_md = curr_md;
  463         }
  464     }
  465     /* got somewhere to put it? */
  466     if (free_md == NULL)
  467         return(ENOSPC);
  468 
  469     /* Set up new descriptor */
  470     free_md->mr_base = mrd->mr_base;
  471     free_md->mr_len = mrd->mr_len;
  472     free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
  473     bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
  474     return(0);
  475 }
  476 
  477 /*
  478  * Handle requests to set memory range attributes by manipulating MTRRs.
  479  *
  480  */
  481 static int
  482 i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
  483 {
  484     struct mem_range_desc       *targ;
  485     int                         error = 0;
  486 
  487     switch(*arg) {
  488     case MEMRANGE_SET_UPDATE:
  489         /* make sure that what's being asked for is even possible at all */
  490         if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
  491             i686_mtrrtype(mrd->mr_flags) == -1)
  492             return(EINVAL);
  493 
  494 #define FIXTOP  ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
  495 
  496         /* are the "low memory" conditions applicable? */
  497         if ((sc->mr_cap & MR686_FIXMTRR) &&
  498             ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
  499             if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
  500                 return(error);
  501         } else {
  502             /* it's time to play with variable MTRRs */
  503             if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
  504                 return(error);
  505         }
  506         break;
  507 
  508     case MEMRANGE_SET_REMOVE:
  509         if ((targ = mem_range_match(sc, mrd)) == NULL)
  510             return(ENOENT);
  511         if (targ->mr_flags & MDF_FIXACTIVE)
  512             return(EPERM);
  513         if (targ->mr_flags & MDF_BUSY)
  514             return(EBUSY);
  515         targ->mr_flags &= ~MDF_ACTIVE;
  516         targ->mr_owner[0] = 0;
  517         break;
  518 
  519     default:
  520         return(EOPNOTSUPP);
  521     }
  522 
  523     /* update the hardware */
  524     i686_mrstore(sc);
  525     i686_mrfetch(sc);   /* refetch to see where we're at */
  526     return(0);
  527 }
  528 
  529 /*
  530  * Work out how many ranges we support, initialise storage for them, 
  531  * fetch the initial settings.
  532  */
  533 static void
  534 i686_mrinit(struct mem_range_softc *sc)
  535 {
  536     struct mem_range_desc       *mrd;
  537     int                         nmdesc = 0;
  538     int                         i;
  539 
  540     mtrrcap = rdmsr(MSR_MTRRcap);
  541     mtrrdef = rdmsr(MSR_MTRRdefType);
  542 
  543     /* For now, bail out if MTRRs are not enabled */
  544     if (!(mtrrdef & 0x800)) {
  545         if (bootverbose)
  546             printf("CPU supports MTRRs but not enabled\n");
  547         return;
  548     }
  549     nmdesc = mtrrcap & 0xff;
  550     printf("Pentium Pro MTRR support enabled\n");
  551 
  552     /* If fixed MTRRs supported and enabled */
  553     if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
  554         sc->mr_cap = MR686_FIXMTRR;
  555         nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
  556     }
  557 
  558     sc->mr_desc = 
  559         (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc), 
  560                                         M_MEMDESC, M_WAITOK | M_ZERO);
  561     sc->mr_ndesc = nmdesc;
  562 
  563     mrd = sc->mr_desc;
  564 
  565     /* Populate the fixed MTRR entries' base/length */
  566     if (sc->mr_cap & MR686_FIXMTRR) {
  567         for (i = 0; i < MTRR_N64K; i++, mrd++) {
  568             mrd->mr_base = i * 0x10000;
  569             mrd->mr_len = 0x10000;
  570             mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
  571         }
  572         for (i = 0; i < MTRR_N16K; i++, mrd++) {
  573             mrd->mr_base = i * 0x4000 + 0x80000;
  574             mrd->mr_len = 0x4000;
  575             mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
  576         }
  577         for (i = 0; i < MTRR_N4K; i++, mrd++) {
  578             mrd->mr_base = i * 0x1000 + 0xc0000;
  579             mrd->mr_len = 0x1000;
  580             mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
  581         }
  582     }
  583 
  584     /* 
  585      * Get current settings, anything set now is considered to have 
  586      * been set by the firmware. (XXX has something already played here?)
  587      */
  588     i686_mrfetch(sc);
  589     mrd = sc->mr_desc;
  590     for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
  591         if (mrd->mr_flags & MDF_ACTIVE)
  592             mrd->mr_flags |= MDF_FIRMWARE;
  593     }
  594 }
  595 
  596 /*
  597  * Initialise MTRRs on an AP after the BSP has run the init code.
  598  */
  599 static void
  600 i686_mrAPinit(struct mem_range_softc *sc)
  601 {
  602     i686_mrstoreone((void *)sc);        /* set MTRRs to match BSP */
  603     wrmsr(MSR_MTRRdefType, mtrrdef);    /* set MTRR behaviour to match BSP */
  604 }
  605 
  606 static void
  607 i686_mem_drvinit(void *unused)
  608 {
  609     /* Try for i686 MTRRs */
  610     if (!mtrrs_disabled && (cpu_feature & CPUID_MTRR) &&
  611         ((cpu_id & 0xf00) == 0x600 || (cpu_id & 0xf00) == 0xf00) &&
  612         ((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
  613         (strcmp(cpu_vendor, "AuthenticAMD") == 0))) {
  614         mem_range_softc.mr_op = &i686_mrops;
  615     }
  616 }
  617 
  618 SYSINIT(i686memdev,SI_SUB_DRIVERS,SI_ORDER_FIRST,i686_mem_drvinit,NULL)
  619 
  620         

Cache object: 7ef98d37253a5236d2674962e453171f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.