The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bitsy/mmu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #include        "u.h"
    2 #include        "../port/lib.h"
    3 #include        "mem.h"
    4 #include        "dat.h"
    5 #include        "fns.h"
    6 #include        "io.h"
    7 #include        "ureg.h"
    8 #include        "../port/error.h"
    9 
   10 /*
   11  *  to avoid mmu and cash flushing, we use the pid register in the MMU
   12  *  to map all user addresses.  Although there are 64 possible pids, we
   13  *  can only use 31 because there are only 32 protection domains and we
   14  *  need one for the kernel.  Pid i is thus associated with domain i.
   15  *  Domain 0 is used for the kernel.
   16  */
   17 
   18 /* real protection bits */
   19 enum
   20 {
   21         /* level 1 descriptor bits */
   22         L1TypeMask=     (3<<0),
   23         L1Invalid=      (0<<0),
   24         L1PageTable=    (1<<0),
   25         L1Section=      (2<<0),
   26         L1Cached=       (1<<3),
   27         L1Buffered=     (1<<2),
   28         L1DomShift=     5,
   29         L1Domain0=      (0<<L1DomShift),
   30         L1KernelRO=     (0x0<<10),
   31         L1KernelRW=     (0x1<<10),
   32         L1UserRO=       (0x2<<10),
   33         L1UserRW=       (0x3<<10),
   34         L1SectBaseMask= (0xFFF<<20),
   35         L1PTBaseMask=   (0x3FFFFF<<10),
   36         
   37         /* level 2 descriptor bits */
   38         L2TypeMask=     (3<<0),
   39         L2SmallPage=    (2<<0),
   40         L2LargePage=    (1<<0),
   41         L2Cached=       (1<<3),
   42         L2Buffered=     (1<<2),
   43         L2KernelRW=     (0x55<<4),
   44         L2UserRO=       (0xAA<<4),
   45         L2UserRW=       (0xFF<<4),
   46         L2PageBaseMask= (0xFFFFF<<12),
   47 
   48         /* domain values */
   49         Dnoaccess=      0,
   50         Dclient=        1,
   51         Dmanager=       3,
   52 };
   53 
   54 ulong *l1table;
   55 static int mmuinited;
   56 
   57 /*
   58  *  We map all of memory, flash, and the zeros area with sections.
   59  *  Special use space is mapped on the fly with regmap.
   60  */
   61 void
   62 mmuinit(void)
   63 {
   64         ulong a, o;
   65         ulong *t;
   66 
   67         /* get a prototype level 1 page */
   68         l1table = xspanalloc(16*1024, 16*1024, 0);
   69         memset(l1table, 0, 16*1024);
   70 
   71         /* map low mem (I really don't know why I have to do this -- presotto) */
   72         for(o = 0; o < 1*OneMeg; o += OneMeg)
   73                 l1table[(0+o)>>20] = L1Section | L1KernelRW| L1Domain0 
   74                         | L1Cached | L1Buffered
   75                         | ((0+o)&L1SectBaseMask);
   76 
   77         /* map DRAM */
   78         for(o = 0; o < DRAMTOP-DRAMZERO; o += OneMeg)
   79                 l1table[(DRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0 
   80                         | L1Cached | L1Buffered
   81                         | ((PHYSDRAM0+o)&L1SectBaseMask);
   82 
   83         /* uncached DRAM */
   84         for(o = 0; o < UCDRAMTOP-UCDRAMZERO; o += OneMeg)
   85                 l1table[(UCDRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0 
   86                         | ((PHYSDRAM0+o)&L1SectBaseMask);
   87 
   88         /* map zeros area */
   89         for(o = 0; o < NULLTOP-NULLZERO; o += OneMeg)
   90                 l1table[(NULLZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
   91                         | L1Cached | L1Buffered
   92                         | ((PHYSNULL0+o)&L1SectBaseMask);
   93 
   94         /* map flash */
   95         for(o = 0; o < FLASHTOP-FLASHZERO; o += OneMeg)
   96                 l1table[(FLASHZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
   97                         | ((PHYSFLASH0+o)&L1SectBaseMask);
   98 
   99         /* map peripheral control module regs */
  100         mapspecial(0x80000000, OneMeg);
  101 
  102         /* map system control module regs */
  103         mapspecial(0x90000000, OneMeg);
  104 
  105         /*
  106          *  double map start of ram to exception vectors
  107          */
  108         a = EVECTORS;
  109         t = xspanalloc(BY2PG, 1024, 0);
  110         memset(t, 0, BY2PG);
  111         l1table[a>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
  112         t[(a&0xfffff)>>PGSHIFT] = L2SmallPage | L2KernelRW | (PHYSDRAM0 & L2PageBaseMask);
  113 
  114         mmurestart();
  115 
  116         mmuinited = 1;
  117 }
  118 
  119 void
  120 mmurestart(void) {
  121         /* set up the domain register to cause all domains to obey pte access bits */
  122 
  123         putdac(Dclient);
  124 
  125         /* point to map */
  126         putttb((ulong)l1table);
  127 
  128         /* enable mmu */
  129         wbflush();
  130         mmuinvalidate();
  131         mmuenable();
  132         cacheflush();
  133 }
  134 
  135 /*
  136  *  map on request
  137  */
  138 static void*
  139 _map(ulong pa, int len, ulong zero, ulong top, ulong l1prop, ulong l2prop)
  140 {
  141         ulong *t;
  142         ulong va, i, base, end, off, entry;
  143         int large;
  144         ulong* rv;
  145 
  146         rv = nil;
  147         large = len >= 128*1024;
  148         if(large){
  149                 base = pa & ~(OneMeg-1);
  150                 end = (pa+len-1) & ~(OneMeg-1);
  151         } else {
  152                 base = pa & ~(BY2PG-1);
  153                 end = (pa+len-1) & ~(BY2PG-1);
  154         }
  155         off = pa - base;
  156 
  157         for(va = zero; va < top && base <= end; va += OneMeg){
  158                 switch(l1table[va>>20] & L1TypeMask){
  159                 default:
  160                         /* found unused entry on level 1 table */
  161                         if(large){
  162                                 if(rv == nil)
  163                                         rv = (ulong*)(va+off);
  164                                 l1table[va>>20] = L1Section | l1prop | L1Domain0 |
  165                                                         (base & L1SectBaseMask);
  166                                 base += OneMeg;
  167                                 continue;
  168                         } else {
  169 
  170                                 /* create an L2 page table and keep going */
  171                                 t = xspanalloc(BY2PG, 1024, 0);
  172                                 memset(t, 0, BY2PG);
  173                                 l1table[va>>20] = L1PageTable | L1Domain0 |
  174                                                         (((ulong)t) & L1PTBaseMask);
  175                         }
  176                         break;
  177                 case L1Section:
  178                         /* if it's already mapped in a one meg area, don't remap */
  179                         entry = l1table[va>>20];
  180                         i = entry & L1SectBaseMask;
  181                         if(pa >= i && (pa+len) <= i + OneMeg)
  182                         if((entry & ~L1SectBaseMask) == (L1Section | l1prop | L1Domain0))
  183                                 return (void*)(va + (pa & (OneMeg-1)));
  184                                 
  185                         continue;
  186                 case L1PageTable:
  187                         if(large)
  188                                 continue;
  189                         break;
  190                 }
  191 
  192                 /* here if we're using page maps instead of sections */
  193                 t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
  194                 for(i = 0; i < OneMeg && base <= end; i += BY2PG){
  195                         entry = t[i>>PGSHIFT];
  196 
  197                         /* found unused entry on level 2 table */
  198                         if((entry & L2TypeMask) != L2SmallPage){
  199                                 if(rv == nil)
  200                                         rv = (ulong*)(va+i+off);
  201                                 t[i>>PGSHIFT] = L2SmallPage | l2prop | 
  202                                                 (base & L2PageBaseMask);
  203                                 base += BY2PG;
  204                                 continue;
  205                         }
  206                 }
  207         }
  208 
  209         /* didn't fit */
  210         if(base <= end)
  211                 return nil;
  212         cacheflush();
  213 
  214         return rv;
  215 }
  216 
  217 /* map in i/o registers */
  218 void*
  219 mapspecial(ulong pa, int len)
  220 {
  221         return _map(pa, len, REGZERO, REGTOP, L1KernelRW, L2KernelRW);
  222 }
  223 
  224 /* map add on memory */
  225 void*
  226 mapmem(ulong pa, int len, int cached)
  227 {
  228         ulong l1, l2;
  229 
  230         if(cached){
  231                 l1 = L1KernelRW|L1Cached|L1Buffered;
  232                 l2 = L2KernelRW|L2Cached|L2Buffered;
  233         } else {
  234                 l1 = L1KernelRW;
  235                 l2 = L2KernelRW;
  236         }
  237         return _map(pa, len, EMEMZERO, EMEMTOP, l1, l2);
  238 }
  239 
  240 /* map a virtual address to a physical one */
  241 ulong
  242 mmu_paddr(ulong va)
  243 {
  244         ulong entry;
  245         ulong *t;
  246 
  247         entry = l1table[va>>20];
  248         switch(entry & L1TypeMask){
  249         case L1Section:
  250                 return (entry & L1SectBaseMask) | (va & (OneMeg-1));
  251         case L1PageTable:
  252                 t = (ulong*)(entry & L1PTBaseMask);
  253                 va &= OneMeg-1;
  254                 entry = t[va>>PGSHIFT];
  255                 switch(entry & L1TypeMask){
  256                 case L2SmallPage:
  257                         return (entry & L2PageBaseMask) | (va & (BY2PG-1));
  258                 }
  259         }
  260         return 0;
  261 }
  262 
  263 /* map a physical address to a virtual one */
  264 ulong
  265 findva(ulong pa, ulong zero, ulong top)
  266 {
  267         int i;
  268         ulong entry, va;
  269         ulong start, end;
  270         ulong *t;
  271 
  272         for(va = zero; va < top; va += OneMeg){
  273                 /* search the L1 entry */
  274                 entry = l1table[va>>20];
  275                 switch(entry & L1TypeMask){
  276                 default:
  277                         return 0;       /* no holes */
  278                 case L1Section:
  279                         start = entry & L1SectBaseMask;
  280                         end = start + OneMeg;
  281                         if(pa >= start && pa < end)
  282                                 return va | (pa & (OneMeg-1));
  283                         continue;
  284                 case L1PageTable:
  285                         break;
  286                 }
  287 
  288                 /* search the L2 entry */
  289                 t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
  290                 for(i = 0; i < OneMeg; i += BY2PG){
  291                         entry = t[i>>PGSHIFT];
  292 
  293                         /* found unused entry on level 2 table */
  294                         if((entry & L2TypeMask) != L2SmallPage)
  295                                 break;
  296 
  297                         start = entry & L2PageBaseMask;
  298                         end = start + BY2PG;
  299                         if(pa >= start && pa < end)
  300                                 return va | (BY2PG*i) | (pa & (BY2PG-1));
  301                 }
  302         }
  303         return 0;
  304 }
  305 ulong
  306 mmu_kaddr(ulong pa)
  307 {
  308         ulong va;
  309 
  310         /* try the easy stuff first (the first case is true most of the time) */
  311         if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
  312                 return DRAMZERO+(pa-PHYSDRAM0);
  313         if(pa >= PHYSFLASH0 && pa <= PHYSFLASH0+(FLASHTOP-FLASHZERO))
  314                 return FLASHZERO+(pa-PHYSFLASH0);
  315         if(pa >= PHYSNULL0 && pa <= PHYSNULL0+(NULLTOP-NULLZERO))
  316                 return NULLZERO+(pa-PHYSNULL0);
  317 
  318         if(!mmuinited)
  319                 return 0;       /* this shouldn't happen */
  320 
  321         /* walk the map for the special regs and extended memory */
  322         va = findva(pa, EMEMZERO, EMEMTOP);
  323         if(va != 0)
  324                 return va;
  325         return findva(pa, REGZERO, REGTOP);
  326 }
  327 
  328 /*
  329  * Return the number of bytes that can be accessed via KADDR(pa).
  330  * If pa is not a valid argument to KADDR, return 0.
  331  */
  332 ulong
  333 cankaddr(ulong pa)
  334 {
  335         /*
  336          * Is this enough?
  337          * We'll find out if anyone still has one
  338          * of these...
  339          */
  340         if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
  341                 return PHYSDRAM0+(DRAMTOP-DRAMZERO) - pa;
  342         return 0;
  343 }
  344 
  345 /*
  346  *  table to map fault.c bits to physical bits
  347  */
  348 static ulong mmubits[16] =
  349 {
  350         [PTEVALID]                              L2SmallPage|L2Cached|L2Buffered|L2UserRO,
  351         [PTEVALID|PTEWRITE]                     L2SmallPage|L2Cached|L2Buffered|L2UserRW,
  352         [PTEVALID|PTEUNCACHED]                  L2SmallPage|L2UserRO,
  353         [PTEVALID|PTEUNCACHED|PTEWRITE]         L2SmallPage|L2UserRW,
  354 
  355         [PTEKERNEL|PTEVALID]                    L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
  356         [PTEKERNEL|PTEVALID|PTEWRITE]           L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
  357         [PTEKERNEL|PTEVALID|PTEUNCACHED]                L2SmallPage|L2KernelRW,
  358         [PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE]       L2SmallPage|L2KernelRW,
  359 };
  360 
  361 /*
  362  *  add an entry to the current map
  363  */
  364 void
  365 putmmu(ulong va, ulong pa, Page *pg)
  366 {
  367         Page *l2pg;
  368         ulong *t, *l1p, *l2p;
  369         int s;
  370 
  371         s = splhi();
  372 
  373         /* clear out the current entry */
  374         mmuinvalidateaddr(va);
  375 
  376         l2pg = up->l1page[va>>20];
  377         if(l2pg == nil){
  378                 l2pg = up->mmufree;
  379                 if(l2pg != nil){
  380                         up->mmufree = l2pg->next;
  381                 } else {
  382                         l2pg = auxpage();
  383                         if(l2pg == nil)
  384                                 pexit("out of memory", 1);
  385                 }
  386                 l2pg->va = VA(kmap(l2pg));
  387                 up->l1page[va>>20] = l2pg;
  388                 memset((uchar*)(l2pg->va), 0, BY2PG);
  389         }
  390 
  391         /* always point L1 entry to L2 page, can't hurt */
  392         l1p = &l1table[va>>20];
  393         *l1p = L1PageTable | L1Domain0 | (l2pg->pa & L1PTBaseMask);
  394         up->l1table[va>>20] = *l1p;
  395         t = (ulong*)l2pg->va;
  396 
  397         /* set L2 entry */
  398         l2p = &t[(va & (OneMeg-1))>>PGSHIFT];
  399         *l2p = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
  400                 | (pa & ~(PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE));
  401 
  402         /*  write back dirty entries - we need this because the pio() in
  403          *  fault.c is writing via a different virt addr and won't clean
  404          *  its changes out of the dcache.  Page coloring doesn't work
  405          *  on this mmu because the virtual cache is set associative
  406          *  rather than direct mapped.
  407          */
  408         cachewb();
  409         if(pg->cachectl[0] == PG_TXTFLUSH){
  410                 /* pio() sets PG_TXTFLUSH whenever a text page has been written */
  411                 icacheinvalidate();
  412                 pg->cachectl[0] = PG_NOFLUSH;
  413         }
  414 
  415         splx(s);
  416 }
  417 
  418 /*
  419  *  free up all page tables for this proc
  420  */
  421 void
  422 mmuptefree(Proc *p)
  423 {
  424         Page *pg;
  425         int i;
  426 
  427         for(i = 0; i < Nmeg; i++){
  428                 pg = p->l1page[i];
  429                 if(pg == nil)
  430                         continue;
  431                 p->l1page[i] = nil;
  432                 pg->next = p->mmufree;
  433                 p->mmufree = pg;
  434         }
  435         memset(p->l1table, 0, sizeof(p->l1table));
  436 }
  437 
  438 /*
  439  *  this is called with palloc locked so the pagechainhead is kosher
  440  */
  441 void
  442 mmurelease(Proc* p)
  443 {
  444         Page *pg, *next;
  445 
  446         /* write back dirty cache entries before changing map */
  447         cacheflush();
  448 
  449         mmuptefree(p);
  450 
  451         for(pg = p->mmufree; pg; pg = next){
  452                 next = pg->next;
  453                 if(--pg->ref)
  454                         panic("mmurelease: pg->ref %d\n", pg->ref);
  455                 pagechainhead(pg);
  456         }
  457         if(p->mmufree && palloc.r.p)
  458                 wakeup(&palloc.r);
  459         p->mmufree = nil;
  460 
  461         memset(l1table, 0, sizeof(p->l1table));
  462         cachewbregion((ulong)l1table, sizeof(p->l1table));
  463 }
  464 
  465 void
  466 mmuswitch(Proc *p)
  467 {
  468         if(m->mmupid == p->pid && p->newtlb == 0)
  469                 return;
  470         m->mmupid = p->pid;
  471 
  472         /* write back dirty cache entries and invalidate all cache entries */
  473         cacheflush();
  474 
  475         if(p->newtlb){
  476                 mmuptefree(p);
  477                 p->newtlb = 0;
  478         }
  479 
  480         /* move in new map */
  481         memmove(l1table, p->l1table, sizeof(p->l1table));
  482 
  483         /* make sure map is in memory */
  484         cachewbregion((ulong)l1table, sizeof(p->l1table));
  485 
  486         /* lose any possible stale tlb entries */
  487         mmuinvalidate();
  488 }
  489 
  490 void
  491 flushmmu(void)
  492 {
  493         int s;
  494 
  495         s = splhi();
  496         up->newtlb = 1;
  497         mmuswitch(up);
  498         splx(s);
  499 }
  500 
  501 void
  502 peekmmu(ulong va)
  503 {
  504         ulong e, d;
  505 
  506         e = l1table[va>>20];
  507         switch(e & L1TypeMask){
  508         default:
  509                 iprint("l1: %#p[%#lux] = %#lux invalid\n", l1table, va>>20, e);
  510                 break;
  511         case L1PageTable:
  512                 iprint("l1: %#p[%#lux] = %#lux pt\n", l1table, va>>20, e);
  513                 va &= OneMeg-1;
  514                 va >>= PGSHIFT;
  515                 e &= L1PTBaseMask;
  516                 d = ((ulong*)e)[va];
  517                 iprint("l2: %#lux[%#lux] = %#lux\n", e, va, d);
  518                 break;
  519         case L1Section:
  520                 iprint("l1: %#p[%#lux] = %#lux section\n", l1table, va>>20, e);
  521                 break;
  522         }
  523 }
  524 
  525 void
  526 checkmmu(ulong, ulong)
  527 {
  528 }
  529 
  530 void
  531 countpagerefs(ulong*, int)
  532 {
  533 }

Cache object: 5c0b07c69b9b9322878eed51dbef9f80


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.