The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/pc/memory.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Size memory and create the kernel page-tables on the fly while doing so.
    3  * Called from main(), this code should only be run by the bootstrap processor.
    4  *
    5  * MemMin is what the bootstrap code in l.s has already mapped;
    6  * MemMax is the limit of physical memory to scan.
    7  */
    8 #include "u.h"
    9 #include "../port/lib.h"
   10 #include "mem.h"
   11 #include "dat.h"
   12 #include "fns.h"
   13 #include "io.h"
   14 #include "ureg.h"
   15 
   16 #define MEMDEBUG        0
   17 
   18 enum {
   19         MemUPA          = 0,            /* unbacked physical address */
   20         MemRAM          = 1,            /* physical memory */
   21         MemUMB          = 2,            /* upper memory block (<16MB) */
   22         MemReserved     = 3,
   23         NMemType        = 4,
   24 
   25         KB              = 1024,
   26 
   27         MemMin          = 8*MB,
   28         MemMax          = (3*1024+768)*MB,
   29 };
   30 
   31 typedef struct Map Map;
   32 struct Map {
   33         ulong   size;
   34         ulong   addr;
   35 };
   36 
   37 typedef struct RMap RMap;
   38 struct RMap {
   39         char*   name;
   40         Map*    map;
   41         Map*    mapend;
   42 
   43         Lock;
   44 };
   45 
   46 /* 
   47  * Memory allocation tracking.
   48  */
   49 static Map mapupa[16];
   50 static RMap rmapupa = {
   51         "unallocated unbacked physical memory",
   52         mapupa,
   53         &mapupa[nelem(mapupa)-1],
   54 };
   55 
   56 static Map xmapupa[16];
   57 static RMap xrmapupa = {
   58         "unbacked physical memory",
   59         xmapupa,
   60         &xmapupa[nelem(xmapupa)-1],
   61 };
   62 
   63 static Map mapram[16];
   64 static RMap rmapram = {
   65         "physical memory",
   66         mapram,
   67         &mapram[nelem(mapram)-1],
   68 };
   69 
   70 static Map mapumb[64];
   71 static RMap rmapumb = {
   72         "upper memory block",
   73         mapumb,
   74         &mapumb[nelem(mapumb)-1],
   75 };
   76 
   77 static Map mapumbrw[16];
   78 static RMap rmapumbrw = {
   79         "UMB device memory",
   80         mapumbrw,
   81         &mapumbrw[nelem(mapumbrw)-1],
   82 };
   83 
   84 void
   85 mapprint(RMap *rmap)
   86 {
   87         Map *mp;
   88 
   89         print("%s\n", rmap->name);      
   90         for(mp = rmap->map; mp->size; mp++)
   91                 print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
   92 }
   93 
   94 
   95 void
   96 memdebug(void)
   97 {
   98         ulong maxpa, maxpa1, maxpa2;
   99 
  100         maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
  101         maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
  102         maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
  103         print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
  104                 maxpa, MB+maxpa*KB, maxpa1, maxpa2);
  105 
  106         mapprint(&rmapram);
  107         mapprint(&rmapumb);
  108         mapprint(&rmapumbrw);
  109         mapprint(&rmapupa);
  110 }
  111 
  112 void
  113 mapfree(RMap* rmap, ulong addr, ulong size)
  114 {
  115         Map *mp;
  116         ulong t;
  117 
  118         if(size <= 0)
  119                 return;
  120 
  121         lock(rmap);
  122         for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
  123                 ;
  124 
  125         if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
  126                 (mp-1)->size += size;
  127                 if(addr+size == mp->addr){
  128                         (mp-1)->size += mp->size;
  129                         while(mp->size){
  130                                 mp++;
  131                                 (mp-1)->addr = mp->addr;
  132                                 (mp-1)->size = mp->size;
  133                         }
  134                 }
  135         }
  136         else{
  137                 if(addr+size == mp->addr && mp->size){
  138                         mp->addr -= size;
  139                         mp->size += size;
  140                 }
  141                 else do{
  142                         if(mp >= rmap->mapend){
  143                                 print("mapfree: %s: losing 0x%luX, %ld\n",
  144                                         rmap->name, addr, size);
  145                                 break;
  146                         }
  147                         t = mp->addr;
  148                         mp->addr = addr;
  149                         addr = t;
  150                         t = mp->size;
  151                         mp->size = size;
  152                         mp++;
  153                 }while(size = t);
  154         }
  155         unlock(rmap);
  156 }
  157 
  158 ulong
  159 mapalloc(RMap* rmap, ulong addr, int size, int align)
  160 {
  161         Map *mp;
  162         ulong maddr, oaddr;
  163 
  164         lock(rmap);
  165         for(mp = rmap->map; mp->size; mp++){
  166                 maddr = mp->addr;
  167 
  168                 if(addr){
  169                         /*
  170                          * A specific address range has been given:
  171                          *   if the current map entry is greater then
  172                          *   the address is not in the map;
  173                          *   if the current map entry does not overlap
  174                          *   the beginning of the requested range then
  175                          *   continue on to the next map entry;
  176                          *   if the current map entry does not entirely
  177                          *   contain the requested range then the range
  178                          *   is not in the map.
  179                          */
  180                         if(maddr > addr)
  181                                 break;
  182                         if(mp->size < addr - maddr)     /* maddr+mp->size < addr, but no overflow */
  183                                 continue;
  184                         if(addr - maddr > mp->size - size)      /* addr+size > maddr+mp->size, but no overflow */
  185                                 break;
  186                         maddr = addr;
  187                 }
  188 
  189                 if(align > 0)
  190                         maddr = ((maddr+align-1)/align)*align;
  191                 if(mp->addr+mp->size-maddr < size)
  192                         continue;
  193 
  194                 oaddr = mp->addr;
  195                 mp->addr = maddr+size;
  196                 mp->size -= maddr-oaddr+size;
  197                 if(mp->size == 0){
  198                         do{
  199                                 mp++;
  200                                 (mp-1)->addr = mp->addr;
  201                         }while((mp-1)->size = mp->size);
  202                 }
  203 
  204                 unlock(rmap);
  205                 if(oaddr != maddr)
  206                         mapfree(rmap, oaddr, maddr-oaddr);
  207 
  208                 return maddr;
  209         }
  210         unlock(rmap);
  211 
  212         return 0;
  213 }
  214 
  215 /*
  216  * Allocate from the ram map directly to make page tables.
  217  * Called by mmuwalk during e820scan.
  218  */
  219 void*
  220 rampage(void)
  221 {
  222         ulong m;
  223         
  224         m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
  225         if(m == 0)
  226                 return nil;
  227         return KADDR(m);
  228 }
  229 
  230 static void
  231 umbexclude(void)
  232 {
  233         int size;
  234         ulong addr;
  235         char *op, *p, *rptr;
  236 
  237         if((p = getconf("umbexclude")) == nil)
  238                 return;
  239 
  240         while(p && *p != '\0' && *p != '\n'){
  241                 op = p;
  242                 addr = strtoul(p, &rptr, 0);
  243                 if(rptr == nil || rptr == p || *rptr != '-'){
  244                         print("umbexclude: invalid argument <%s>\n", op);
  245                         break;
  246                 }
  247                 p = rptr+1;
  248 
  249                 size = strtoul(p, &rptr, 0) - addr + 1;
  250                 if(size <= 0){
  251                         print("umbexclude: bad range <%s>\n", op);
  252                         break;
  253                 }
  254                 if(rptr != nil && *rptr == ',')
  255                         *rptr++ = '\0';
  256                 p = rptr;
  257 
  258                 mapalloc(&rmapumb, addr, size, 0);
  259         }
  260 }
  261 
  262 static void
  263 umbscan(void)
  264 {
  265         uchar *p;
  266 
  267         /*
  268          * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
  269          * which aren't used; they can be used later for devices which
  270          * want to allocate some virtual address space.
  271          * Check for two things:
  272          * 1) device BIOS ROM. This should start with a two-byte header
  273          *    of 0x55 0xAA, followed by a byte giving the size of the ROM
  274          *    in 512-byte chunks. These ROM's must start on a 2KB boundary.
  275          * 2) device memory. This is read-write.
  276          * There are some assumptions: there's VGA memory at 0xA0000 and
  277          * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
  278          * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
  279          * for grabs; check anyway.
  280          */
  281         p = KADDR(0xD0000);
  282         while(p < (uchar*)KADDR(0xE0000)){
  283                 /*
  284                  * Test for 0x55 0xAA before poking obtrusively,
  285                  * some machines (e.g. Thinkpad X20) seem to map
  286                  * something dynamic here (cardbus?) causing weird
  287                  * problems if it is changed.
  288                  */
  289                 if(p[0] == 0x55 && p[1] == 0xAA){
  290                         p += p[2]*512;
  291                         continue;
  292                 }
  293 
  294                 p[0] = 0xCC;
  295                 p[2*KB-1] = 0xCC;
  296                 if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
  297                         p[0] = 0x55;
  298                         p[1] = 0xAA;
  299                         p[2] = 4;
  300                         if(p[0] == 0x55 && p[1] == 0xAA){
  301                                 p += p[2]*512;
  302                                 continue;
  303                         }
  304                         if(p[0] == 0xFF && p[1] == 0xFF)
  305                                 mapfree(&rmapumb, PADDR(p), 2*KB);
  306                 }
  307                 else
  308                         mapfree(&rmapumbrw, PADDR(p), 2*KB);
  309                 p += 2*KB;
  310         }
  311 
  312         p = KADDR(0xE0000);
  313         if(p[0] != 0x55 || p[1] != 0xAA){
  314                 p[0] = 0xCC;
  315                 p[64*KB-1] = 0xCC;
  316                 if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
  317                         mapfree(&rmapumb, PADDR(p), 64*KB);
  318         }
  319 
  320         umbexclude();
  321 }
  322 
  323 static void
  324 lowraminit(void)
  325 {
  326         ulong n, pa, x;
  327         uchar *bda;
  328 
  329         /*
  330          * Initialise the memory bank information for conventional memory
  331          * (i.e. less than 640KB). The base is the first location after the
  332          * bootstrap processor MMU information and the limit is obtained from
  333          * the BIOS data area.
  334          */
  335         x = PADDR(CPU0END);
  336         bda = (uchar*)KADDR(0x400);
  337         n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
  338         mapfree(&rmapram, x, n);
  339         memset(KADDR(x), 0, n);                 /* keep us honest */
  340 
  341         x = PADDR(PGROUND((ulong)end));
  342         pa = MemMin;
  343         if(x > pa)
  344                 panic("kernel too big");
  345         mapfree(&rmapram, x, pa-x);
  346         memset(KADDR(x), 0, pa-x);              /* keep us honest */
  347 }
  348 
  349 static void
  350 ramscan(ulong maxmem)
  351 {
  352         ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
  353         int nvalid[NMemType];
  354 
  355         /*
  356          * The bootstrap code has has created a prototype page
  357          * table which maps the first MemMin of physical memory to KZERO.
  358          * The page directory is at m->pdb and the first page of
  359          * free memory is after the per-processor MMU information.
  360          */
  361         pa = MemMin;
  362 
  363         /*
  364          * Check if the extended memory size can be obtained from the CMOS.
  365          * If it's 0 then it's either not known or >= 64MB. Always check
  366          * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
  367          * in this case the memory from the gap is remapped to the top of
  368          * memory.
  369          * The value in CMOS is supposed to be the number of KB above 1MB.
  370          */
  371         if(maxmem == 0){
  372                 x = (nvramread(0x18)<<8)|nvramread(0x17);
  373                 if(x == 0 || x >= (63*KB))
  374                         maxpa = MemMax;
  375                 else
  376                         maxpa = MB+x*KB;
  377                 if(maxpa < 24*MB)
  378                         maxpa = 24*MB;
  379         }else
  380                 maxpa = maxmem;
  381         maxkpa = (u32int)-KZERO;        /* 2^32 - KZERO */
  382 
  383         /*
  384          * March up memory from MemMin to maxpa 1MB at a time,
  385          * mapping the first page and checking the page can
  386          * be written and read correctly. The page tables are created here
  387          * on the fly, allocating from low memory as necessary.
  388          */
  389         k0 = (ulong*)KADDR(0);
  390         kzero = *k0;
  391         map = 0;
  392         x = 0x12345678;
  393         memset(nvalid, 0, sizeof(nvalid));
  394         
  395         /*
  396          * Can't map memory to KADDR(pa) when we're walking because
  397          * can only use KADDR for relatively low addresses.
  398          * Instead, map each 4MB we scan to the virtual address range
  399          * MemMin->MemMin+4MB while we are scanning.
  400          */
  401         vbase = MemMin;
  402         while(pa < maxpa){
  403                 /*
  404                  * Map the page. Use mapalloc(&rmapram, ...) to make
  405                  * the page table if necessary, it will be returned to the
  406                  * pool later if it isn't needed.  Map in a fixed range (the second 4M)
  407                  * because high physical addresses cannot be passed to KADDR.
  408                  */
  409                 va = (void*)(vbase + pa%(4*MB));
  410                 table = &m->pdb[PDX(va)];
  411                 if(pa%(4*MB) == 0){
  412                         if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
  413                                 break;
  414                         memset(KADDR(map), 0, BY2PG);
  415                         *table = map|PTEWRITE|PTEVALID;
  416                         memset(nvalid, 0, sizeof(nvalid));
  417                 }
  418                 table = KADDR(PPN(*table));
  419                 pte = &table[PTX(va)];
  420 
  421                 *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  422                 mmuflushtlb(PADDR(m->pdb));
  423                 /*
  424                  * Write a pattern to the page and write a different
  425                  * pattern to a possible mirror at KZERO. If the data
  426                  * reads back correctly the chunk is some type of RAM (possibly
  427                  * a linearly-mapped VGA framebuffer, for instance...) and
  428                  * can be cleared and added to the memory pool. If not, the
  429                  * chunk is marked uncached and added to the UMB pool if <16MB
  430                  * or is marked invalid and added to the UPA pool.
  431                  */
  432                 *va = x;
  433                 *k0 = ~x;
  434                 if(*va == x){
  435                         nvalid[MemRAM] += MB/BY2PG;
  436                         mapfree(&rmapram, pa, MB);
  437 
  438                         do{
  439                                 *pte++ = pa|PTEWRITE|PTEVALID;
  440                                 pa += BY2PG;
  441                         }while(pa % MB);
  442                         mmuflushtlb(PADDR(m->pdb));
  443                         /* memset(va, 0, MB); so damn slow to memset all of memory */
  444                 }
  445                 else if(pa < 16*MB){
  446                         nvalid[MemUMB] += MB/BY2PG;
  447                         mapfree(&rmapumb, pa, MB);
  448 
  449                         do{
  450                                 *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  451                                 pa += BY2PG;
  452                         }while(pa % MB);
  453                 }
  454                 else{
  455                         nvalid[MemUPA] += MB/BY2PG;
  456                         mapfree(&rmapupa, pa, MB);
  457 
  458                         *pte = 0;
  459                         pa += MB;
  460                 }
  461                 /*
  462                  * Done with this 4MB chunk, review the options:
  463                  * 1) not physical memory and >=16MB - invalidate the PDB entry;
  464                  * 2) physical memory - use the 4MB page extension if possible;
  465                  * 3) not physical memory and <16MB - use the 4MB page extension
  466                  *    if possible;
  467                  * 4) mixed or no 4MB page extension - commit the already
  468                  *    initialised space for the page table.
  469                  */
  470                 if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
  471                         /*
  472                          * If we encounter a 4MB chunk of missing memory
  473                          * at a sufficiently high offset, call it the end of
  474                          * memory.  Otherwise we run the risk of thinking
  475                          * that video memory is real RAM.
  476                          */
  477                         break;
  478                 }
  479                 if(pa <= maxkpa && pa%(4*MB) == 0){
  480                         table = &m->pdb[PDX(KADDR(pa - 4*MB))];
  481                         if(nvalid[MemUPA] == (4*MB)/BY2PG)
  482                                 *table = 0;
  483                         else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  484                                 *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
  485                         else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  486                                 *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
  487                         else{
  488                                 *table = map|PTEWRITE|PTEVALID;
  489                                 map = 0;
  490                         }
  491                 }
  492                 mmuflushtlb(PADDR(m->pdb));
  493                 x += 0x3141526;
  494         }
  495         /*
  496          * If we didn't reach the end of the 4MB chunk, that part won't
  497          * be mapped.  Commit the already initialised space for the page table.
  498          */
  499         if(pa % (4*MB) && pa <= maxkpa){
  500                 m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
  501                 map = 0;
  502         }
  503         if(map)
  504                 mapfree(&rmapram, map, BY2PG);
  505 
  506         m->pdb[PDX(vbase)] = 0;
  507         mmuflushtlb(PADDR(m->pdb));
  508 
  509         mapfree(&rmapupa, pa, (u32int)-pa);
  510         *k0 = kzero;
  511 }
  512 
  513 /*
  514  * BIOS Int 0x15 E820 memory map.
  515  */
  516 enum
  517 {
  518         SMAP = ('S'<<24)|('M'<<16)|('A'<<8)|'P',
  519         Ememory = 1,
  520         Ereserved = 2,
  521         Carry = 1,
  522 };
  523 
  524 typedef struct Emap Emap;
  525 struct Emap
  526 {
  527         uvlong base;
  528         uvlong len;
  529         ulong type;
  530 };
  531 static Emap emap[16];
  532 int nemap;
  533 
  534 static char *etypes[] =
  535 {
  536         "type=0",
  537         "memory",
  538         "reserved",
  539         "acpi reclaim",
  540         "acpi nvs",
  541 };
  542 
  543 static int
  544 emapcmp(const void *va, const void *vb)
  545 {
  546         Emap *a, *b;
  547         
  548         a = (Emap*)va;
  549         b = (Emap*)vb;
  550         if(a->base < b->base)
  551                 return -1;
  552         if(a->base > b->base)
  553                 return 1;
  554         if(a->len < b->len)
  555                 return -1;
  556         if(a->len > b->len)
  557                 return 1;
  558         return a->type - b->type;
  559 }
  560 
  561 static void
  562 map(ulong base, ulong len, int type)
  563 {
  564         ulong e, n;
  565         ulong *table, flags, maxkpa;
  566         
  567         /*
  568          * Split any call crossing MemMin to make below simpler.
  569          */
  570         if(base < MemMin && len > MemMin-base){
  571                 n = MemMin - base;
  572                 map(base, n, type);
  573                 map(MemMin, len-n, type);
  574         }
  575         
  576         /*
  577          * Let lowraminit and umbscan hash out the low MemMin.
  578          */
  579         if(base < MemMin)
  580                 return;
  581 
  582         /*
  583          * Any non-memory below 16*MB is used as upper mem blocks.
  584          */
  585         if(type == MemUPA && base < 16*MB && base+len > 16*MB){
  586                 map(base, 16*MB-base, MemUMB);
  587                 map(16*MB, len-(16*MB-base), MemUPA);
  588                 return;
  589         }
  590         
  591         /*
  592          * Memory below CPU0END is reserved for the kernel
  593          * and already mapped.
  594          */
  595         if(base < PADDR(CPU0END)){
  596                 n = PADDR(CPU0END) - base;
  597                 if(len <= n)
  598                         return;
  599                 map(PADDR(CPU0END), len-n, type);
  600                 return;
  601         }
  602         
  603         /*
  604          * Memory between KTZERO and end is the kernel itself
  605          * and is already mapped.
  606          */
  607         if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
  608                 map(base, PADDR(KTZERO)-base, type);
  609                 return;
  610         }
  611         if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
  612                 n = PADDR(PGROUND((ulong)end));
  613                 if(len <= n)
  614                         return;
  615                 map(PADDR(PGROUND((ulong)end)), len-n, type);
  616                 return;
  617         }
  618         
  619         /*
  620          * Now we have a simple case.
  621          */
  622         // print("map %.8lux %.8lux %d\n", base, base+len, type);
  623         switch(type){
  624         case MemRAM:
  625                 mapfree(&rmapram, base, len);
  626                 flags = PTEWRITE|PTEVALID;
  627                 break;
  628         case MemUMB:
  629                 mapfree(&rmapumb, base, len);
  630                 flags = PTEWRITE|PTEUNCACHED|PTEVALID;
  631                 break;
  632         case MemUPA:
  633                 mapfree(&rmapupa, base, len);
  634                 flags = 0;
  635                 break;
  636         default:
  637         case MemReserved:
  638                 flags = 0;
  639                 break;
  640         }
  641         
  642         /*
  643          * bottom MemMin is already mapped - just twiddle flags.
  644          * (not currently used - see above)
  645          */
  646         if(base < MemMin){
  647                 table = KADDR(PPN(m->pdb[PDX(base)]));
  648                 e = base+len;
  649                 base = PPN(base);
  650                 for(; base<e; base+=BY2PG)
  651                         table[PTX(base)] |= flags;
  652                 return;
  653         }
  654         
  655         /*
  656          * Only map from KZERO to 2^32.
  657          */
  658         if(flags){
  659                 maxkpa = -KZERO;
  660                 if(base >= maxkpa)
  661                         return;
  662                 if(len > maxkpa-base)
  663                         len = maxkpa - base;
  664                 pdbmap(m->pdb, base|flags, base+KZERO, len);
  665         }
  666 }
  667 
  668 static int
  669 e820scan(void)
  670 {
  671         int i;
  672         Ureg u;
  673         ulong cont, base, len;
  674         uvlong last;
  675         Emap *e;
  676 
  677         if(getconf("*norealmode") || getconf("*noe820scan"))
  678                 return -1;
  679 
  680         cont = 0;
  681         for(i=0; i<nelem(emap); i++){
  682                 memset(&u, 0, sizeof u);
  683                 u.ax = 0xE820;
  684                 u.bx = cont;
  685                 u.cx = 20;
  686                 u.dx = SMAP;
  687                 u.es = (PADDR(RMBUF)>>4)&0xF000;
  688                 u.di = PADDR(RMBUF)&0xFFFF;
  689                 u.trap = 0x15;
  690                 realmode(&u);
  691                 cont = u.bx;
  692                 if((u.flags&Carry) || u.ax != SMAP || u.cx != 20)
  693                         break;
  694                 e = &emap[nemap++];
  695                 *e = *(Emap*)RMBUF;
  696                 if(u.bx == 0)
  697                         break;
  698         }
  699         if(nemap == 0)
  700                 return -1;
  701         
  702         qsort(emap, nemap, sizeof emap[0], emapcmp);
  703 
  704         if(getconf("*noe820print") == nil){
  705                 for(i=0; i<nemap; i++){
  706                         e = &emap[i];
  707                         print("E820: %.8llux %.8llux ", e->base, e->base+e->len);
  708                         if(e->type < nelem(etypes))
  709                                 print("%s\n", etypes[e->type]);
  710                         else
  711                                 print("type=%lud\n", e->type);
  712                 }
  713         }
  714 
  715         last = 0;
  716         for(i=0; i<nemap; i++){ 
  717                 e = &emap[i];
  718                 /*
  719                  * pull out the info but only about the low 32 bits...
  720                  */
  721                 if(e->base >= (1LL<<32))
  722                         break;
  723                 base = e->base;
  724                 if(base+e->len > (1LL<<32))
  725                         len = -base;
  726                 else
  727                         len = e->len;
  728                 /*
  729                  * If the map skips addresses, mark them available.
  730                  */
  731                 if(last < e->base)
  732                         map(last, e->base-last, MemUPA);
  733                 last = base+len;
  734                 if(e->type == Ememory)
  735                         map(base, len, MemRAM);
  736                 else
  737                         map(base, len, MemReserved);
  738         }
  739         if(last < (1LL<<32))
  740                 map(last, (u32int)-last, MemUPA);
  741         return 0;
  742 }
  743 
  744 void
  745 meminit(void)
  746 {
  747         int i;
  748         Map *mp;
  749         Confmem *cm;
  750         ulong pa, *pte;
  751         ulong maxmem, lost;
  752         char *p;
  753 
  754         if(p = getconf("*maxmem"))
  755                 maxmem = strtoul(p, 0, 0);
  756         else
  757                 maxmem = 0;
  758 
  759         /*
  760          * Set special attributes for memory between 640KB and 1MB:
  761          *   VGA memory is writethrough;
  762          *   BIOS ROM's/UMB's are uncached;
  763          * then scan for useful memory.
  764          */
  765         for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
  766                 pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  767                 *pte |= PTEWT;
  768         }
  769         for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
  770                 pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  771                 *pte |= PTEUNCACHED;
  772         }
  773         mmuflushtlb(PADDR(m->pdb));
  774 
  775         umbscan();
  776         lowraminit();
  777         if(e820scan() < 0)
  778                 ramscan(maxmem);
  779 
  780         /*
  781          * Set the conf entries describing banks of allocatable memory.
  782          */
  783         for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
  784                 mp = &rmapram.map[i];
  785                 cm = &conf.mem[i];
  786                 cm->base = mp->addr;
  787                 cm->npage = mp->size/BY2PG;
  788         }
  789         
  790         lost = 0;
  791         for(; i<nelem(mapram); i++)
  792                 lost += rmapram.map[i].size;
  793         if(lost)
  794                 print("meminit - lost %lud bytes\n", lost);
  795 
  796         if(MEMDEBUG)
  797                 memdebug();
  798 }
  799 
  800 /*
  801  * Allocate memory from the upper memory blocks.
  802  */
  803 ulong
  804 umbmalloc(ulong addr, int size, int align)
  805 {
  806         ulong a;
  807 
  808         if(a = mapalloc(&rmapumb, addr, size, align))
  809                 return (ulong)KADDR(a);
  810 
  811         return 0;
  812 }
  813 
  814 void
  815 umbfree(ulong addr, int size)
  816 {
  817         mapfree(&rmapumb, PADDR(addr), size);
  818 }
  819 
  820 ulong
  821 umbrwmalloc(ulong addr, int size, int align)
  822 {
  823         ulong a;
  824         uchar *p;
  825 
  826         if(a = mapalloc(&rmapumbrw, addr, size, align))
  827                 return(ulong)KADDR(a);
  828 
  829         /*
  830          * Perhaps the memory wasn't visible before
  831          * the interface is initialised, so try again.
  832          */
  833         if((a = umbmalloc(addr, size, align)) == 0)
  834                 return 0;
  835         p = (uchar*)a;
  836         p[0] = 0xCC;
  837         p[size-1] = 0xCC;
  838         if(p[0] == 0xCC && p[size-1] == 0xCC)
  839                 return a;
  840         umbfree(a, size);
  841 
  842         return 0;
  843 }
  844 
  845 void
  846 umbrwfree(ulong addr, int size)
  847 {
  848         mapfree(&rmapumbrw, PADDR(addr), size);
  849 }
  850 
  851 /*
  852  * Give out otherwise-unused physical address space
  853  * for use in configuring devices.  Note that unlike upamalloc
  854  * before it, upaalloc does not map the physical address
  855  * into virtual memory.  Call vmap to do that.
  856  */
  857 ulong
  858 upaalloc(int size, int align)
  859 {
  860         ulong a;
  861 
  862         a = mapalloc(&rmapupa, 0, size, align);
  863         if(a == 0){
  864                 print("out of physical address space allocating %d\n", size);
  865                 mapprint(&rmapupa);
  866         }
  867         return a;
  868 }
  869 
  870 void
  871 upafree(ulong pa, int size)
  872 {
  873         mapfree(&rmapupa, pa, size);
  874 }
  875 
  876 void
  877 upareserve(ulong pa, int size)
  878 {
  879         ulong a;
  880         
  881         a = mapalloc(&rmapupa, pa, size, 0);
  882         if(a != pa){
  883                 /*
  884                  * This can happen when we're using the E820
  885                  * map, which might have already reserved some
  886                  * of the regions claimed by the pci devices.
  887                  */
  888         //      print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
  889                 if(a != 0)
  890                         mapfree(&rmapupa, a, size);
  891         }
  892 }
  893 
  894 void
  895 memorysummary(void)
  896 {
  897         memdebug();
  898 }
  899 

Cache object: 6d9872446fa18f2546833b67cb4792d2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.