The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/mappings.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 /*
   23  *      This file is used to maintain the virtual to real mappings for a PowerPC machine.
   24  *      The code herein is primarily used to bridge between the pmap layer and the hardware layer.
   25  *      Currently, some of the function of this module is contained within pmap.c.  We may want to move
   26  *      all of this into it (or most anyway) for the sake of performance.  We shall see as we write it.
   27  *
   28  *      We also depend upon the structure of the phys_entry control block.  We do put some processor 
   29  *      specific stuff in there.
   30  *
   31  */
   32 
   33 #include <debug.h>
   34 #include <mach_kgdb.h>
   35 #include <mach_vm_debug.h>
   36 #include <db_machine_commands.h>
   37 
   38 #include <mach/mach_types.h>
   39 #include <mach/vm_attributes.h>
   40 #include <mach/vm_param.h>
   41 
   42 #include <kern/kern_types.h>
   43 #include <kern/thread.h>
   44 #include <kern/spl.h>
   45 #include <kern/misc_protos.h>
   46 
   47 #include <vm/vm_fault.h>
   48 #include <vm/vm_kern.h>
   49 #include <vm/vm_map.h>
   50 #include <vm/vm_page.h>
   51 #include <vm/pmap.h>
   52 
   53 #include <ppc/exception.h>
   54 #include <ppc/misc_protos.h>
   55 #include <ppc/proc_reg.h>
   56 #include <ppc/pmap.h>
   57 #include <ppc/mem.h>
   58 #include <ppc/new_screen.h>
   59 #include <ppc/Firmware.h>
   60 #include <ppc/mappings.h>
   61 #include <ddb/db_output.h>
   62 
   63 #include <console/video_console.h>              /* (TEST/DEBUG) */
   64 
   65 #define PERFTIMES 0
   66 
   67 vm_map_t        mapping_map = VM_MAP_NULL;
   68 
   69 unsigned int    incrVSID = 0;                                           /* VSID increment value */
   70 unsigned int    mappingdeb0 = 0;                                                
   71 unsigned int    mappingdeb1 = 0;
   72 int ppc_max_adrsp;                                                                      /* Maximum address spaces */                    
   73                                 
   74 addr64_t                *mapdebug;                                                      /* (BRINGUP) */
   75 extern unsigned int DebugWork;                                          /* (BRINGUP) */
   76                                                 
   77 void mapping_verify(void);
   78 void mapping_phys_unused(ppnum_t pa);
   79 
   80 /*
   81  *  ppc_prot translates Mach's representation of protections to that of the PPC hardware.
   82  *  For Virtual Machines (VMM), we also provide translation entries where the output is
   83  *  the same as the input, allowing direct specification of PPC protections. Mach's 
   84  *      representations are always in the range 0..7, so they always fall into the first
   85  *      8 table entries; direct translations are placed in the range 8..16, so they fall into
   86  *  the second half of the table.
   87  *
   88  *  ***NOTE*** I've commented out the Mach->PPC translations that would set page-level
   89  *             no-execute, pending updates to the VM layer that will properly enable its
   90  *             use.  Bob Abeles 08.02.04
   91  */
   92  
   93 //unsigned char ppc_prot[16] = { 4, 7, 6, 6, 3, 3, 2, 2,                /* Mach -> PPC translations */
   94 unsigned char ppc_prot[16] = { 0, 3, 2, 2, 3, 3, 2, 2,          /* Mach -> PPC translations */
   95                                0, 1, 2, 3, 4, 5, 6, 7 };        /* VMM direct  translations */
   96 
   97 /*
   98  *                      About PPC VSID generation:
   99  *
  100  *                      This function is called to generate an address space ID. This space ID must be unique within
  101  *                      the system.  For the PowerPC, it is used to build the VSID.  We build a VSID in the following
  102  *                      way:  space ID << 4 | segment.  Since a VSID is 24 bits, and out of that, we reserve the last
  103  *                      4, so, we can have 2^20 (2M) unique IDs.  Each pmap has a unique space ID, so we should be able
  104  *                      to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then.  The 
  105  *                      problem is that only a certain number of pmaps are kept in a free list and if that is full,
  106  *                      they are release.  This causes us to lose track of what space IDs are free to be reused.
  107  *                      We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings
  108  *                      when the space ID wraps, or 4) scan the list of pmaps and find a free one.
  109  *
  110  *                      Yet another consideration is the hardware use of the VSID.  It is used as part of the hash
  111  *                      calculation for virtual address lookup.  An improperly chosen value could potentially cause
  112  *                      too many hashes to hit the same bucket, causing PTEG overflows.  The actual hash function
  113  *                      is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested
  114  *                      hash table size, there are 2^12 (8192) PTEGs.  Remember, though, that the bottom 4 bits
  115  *                      are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs
  116  *                      before we start hashing to the same buckets with the same vaddrs. Also, within a space ID,
  117  *                      every 8192 pages (32MB) within a segment will hash to the same bucket.  That's 8 collisions
  118  *                      per segment.  So, a scan of every page for 256MB would fill 32 PTEGs completely, but
  119  *                      with no overflow.  I don't think that this is a problem.
  120  *
  121  *                      There may be a problem with the space ID, though. A new space ID is generate (mainly) 
  122  *                      whenever there is a fork.  There shouldn't really be any problem because (for a 32MB
  123  *                      machine) we can have 512 pmaps and still not have hash collisions for the same address.
  124  *                      The potential problem, though, is if we get long-term pmaps that have space IDs that are
  125  *                      the same modulo 512.  We can reduce this problem by having the segment number be bits
  126  *                      0-3 of the space ID rather than 20-23.  Doing this means that, in effect, corresponding
  127  *                      vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem,
  128  *                      I don't think that it is as signifigant as the other, so, I'll make the space ID
  129  *                      with segment first.
  130  *
  131  *                      The final, and biggest problem is the wrap, which will happen every 2^20 space IDs.
  132  *                      While this is a problem that should only happen in periods counted in weeks, it can and
  133  *                      will happen.  This is assuming a monotonically increasing space ID. If we were to search
  134  *                      for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs.
  135  *                      That's pretty unlikely to happen.  There couldn't be enough storage to support a million tasks.
  136  *
  137  *                      So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and
  138  *                      locked by free_pmap_lock) that is sorted in VSID sequence order.
  139  *
  140  *                      Whenever we need a VSID, we walk the list looking for the next in the sequence from
  141  *                      the last that was freed.  The we allocate that.
  142  *
  143  *                      NOTE: We must be called with interruptions off and free_pmap_lock held.
  144  *
  145  */
  146 
  147 /*
  148  *              mapping_init();
  149  *                      Do anything that needs to be done before the mapping system can be used.
  150  *                      Hash table must be initialized before we call this.
  151  *
  152  *                      Calculate the SID increment.  Currently we use size^(1/2) + size^(1/4) + 1;
  153  */
  154 
  155 void mapping_init(void) {
  156 
  157         unsigned int tmp, maxeff, rwidth;
  158         
  159         ppc_max_adrsp = maxAdrSp;                                                                       /* Set maximum address spaces */                        
  160         
  161         maxeff = 32;                                                                                            /* Assume 32-bit */
  162         if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) maxeff = 64;      /* Is this a 64-bit machine? */
  163         
  164         rwidth = PerProcTable[0].ppe_vaddr->pf.pfMaxVAddr - maxAdrSpb;          /* Reduce address width by width of address space ID */
  165         if(rwidth > maxeff) rwidth = maxeff;                                            /* If we still have more virtual than effective, clamp at effective */
  166         
  167         vm_max_address = 0xFFFFFFFFFFFFFFFFULL >> (64 - rwidth);                /* Get maximum effective address supported */
  168         vm_max_physical = 0xFFFFFFFFFFFFFFFFULL >> (64 - PerProcTable[0].ppe_vaddr->pf.pfMaxPAddr);     /* Get maximum physical address supported */
  169         
  170         if(PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit) {                         /* Are we 64 bit? */
  171                 tmp = 12;                                                                                               /* Size of hash space */
  172         }
  173         else {
  174                 __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */
  175                 tmp = 32 - tmp;                                                                                 /* Size of hash space */
  176         }
  177 
  178         incrVSID = 1 << ((tmp + 1) >> 1);                                                       /* Get ceiling of sqrt of table size */
  179         incrVSID |= 1 << ((tmp + 1) >> 2);                                                      /* Get ceiling of quadroot of table size */
  180         incrVSID |= 1;                                                                                          /* Set bit and add 1 */
  181 
  182         return;
  183 
  184 }
  185 
  186 
  187 /*
  188  *              mapping_remove(pmap_t pmap, addr64_t va);
  189  *                      Given a pmap and virtual address, this routine finds the mapping and unmaps it.
  190  *                      The mapping block will be added to
  191  *                      the free list.  If the free list threshold is reached, garbage collection will happen.
  192  *
  193  *                      We also pass back the next higher mapped address. This is done so that the higher level
  194  *                      pmap_remove function can release a range of addresses simply by calling mapping_remove
  195  *                      in a loop until it finishes the range or is returned a vaddr of 0.
  196  *
  197  *                      Note that if the mapping is not found, we return the next VA ORed with 1
  198  *
  199  */
  200 
  201 addr64_t mapping_remove(pmap_t pmap, addr64_t va) {             /* Remove a single mapping for this VADDR 
  202                                                                                                                    Returns TRUE if a mapping was found to remove */
  203 
  204         mapping_t       *mp;
  205         addr64_t        nextva;
  206         ppnum_t         pgaddr;
  207         
  208         va &= ~PAGE_MASK;                                                                       /* Scrub noise bits */
  209         
  210         do {                                                                                            /* Keep trying until we truely fail */
  211                 mp = hw_rem_map(pmap, va, &nextva);                             /* Remove a mapping from this pmap */
  212         } while (mapRtRemove == ((unsigned int)mp & mapRetCode));
  213         
  214         switch ((unsigned int)mp & mapRetCode) {
  215                 case mapRtOK:
  216                         break;                                                                          /* Mapping removed */
  217                 case mapRtNotFnd:
  218                         return (nextva | 1);                                            /* Nothing found to unmap */
  219                 default:
  220                         panic("mapping_remove: hw_rem_map failed - pmap = %08X, va = %016llX, code = %08X\n",
  221                                 pmap, va, mp);
  222                         break;
  223         }
  224 
  225         pgaddr = mp->mpPAddr;                                                           /* Get page number from mapping */
  226         
  227         mapping_free(mp);                                                                       /* Add mapping to the free list */
  228         
  229         if ((pmap->pmapFlags & pmapVMhost) && pmap->pmapVmmExt) {
  230                                                                                                                 /* If this is an assisted host, scrub any guest mappings */
  231                 unsigned int  idx;
  232                 phys_entry_t *physent = mapping_phys_lookup(pgaddr, &idx);
  233                                                                                                                 /* Get physent for our physical page */
  234                 if (!physent) {                                                                 /* No physent, could be in I/O area, so exit */
  235                         return (nextva);
  236                 }
  237                 
  238                 do {                                                                                    /* Iterate 'till all guest mappings are gone */
  239                         mp = hw_scrub_guest(physent, pmap);                     /* Attempt to scrub a guest mapping */
  240                         switch ((unsigned int)mp & mapRetCode) {
  241                                 case mapRtGuest:                                                /* Found a guest mapping */
  242                                 case mapRtNotFnd:                                               /* Mapping was there, but disappeared, must retry */
  243                                 case mapRtEmpty:                                                /* No guest mappings left to scrub */
  244                                         break;
  245                                 default:
  246                                         panic("mapping_remove: hw_scrub_guest failed - physent = %08X, code = %08X\n",
  247                                                 physent, mp);                                   /* Cry havoc, cry wrack,
  248                                                                                                                         at least we die with harness on our backs */
  249                                         break;
  250                         }
  251                 } while (mapRtEmpty != ((unsigned int)mp & mapRetCode));
  252         }
  253 
  254         return nextva;                                                                          /* Tell them we did it */
  255 }
  256 
  257 /*
  258  *              mapping_make(pmap, va, pa, flags, size, prot) - map a virtual address to a real one 
  259  *
  260  *              This routine takes the given parameters, builds a mapping block, and queues it into the 
  261  *              correct lists.
  262  *              
  263  *              pmap (virtual address)          is the pmap to map into
  264  *              va   (virtual address)          is the 64-bit virtual address that is being mapped
  265  *              pa      (physical page number)  is the physical page number (i.e., physcial address >> 12). This is
  266  *                                                                      a 32-bit quantity.
  267  *              Flags:
  268  *                      block                                   if 1, mapping is a block, size parameter is used. Note: we do not keep 
  269  *                                                                      reference and change information or allow protection changes of blocks.
  270  *                                                                      any changes must first unmap and then remap the area.
  271  *                      use attribute                   Use specified attributes for map, not defaults for physical page
  272  *                      perm                                    Mapping is permanent
  273  *                      cache inhibited                 Cache inhibited (used if use attribute or block set )
  274  *                      guarded                                 Guarded access (used if use attribute or block set )
  275  *              size                                            size of block in pages - 1 (not used if not block)
  276  *              prot                                            VM protection bits
  277  *              attr                                            Cachability/Guardedness    
  278  *
  279  *              Returns 0 if mapping was successful.  Returns vaddr that overlaps/collides.
  280  *              Returns 1 for any other failure.
  281  *
  282  *              Note that we make an assumption that all memory in the range 0f 0x0000000080000000 to 0x00000000FFFFFFFF is reserved
  283  *              for I/O and default the cache attrubutes appropriately.  The caller is free to set whatever they want however.
  284  *
  285  *              If there is any physical page that is not found in the physent table, the mapping is forced to be a
  286  *              block mapping of length 1.  This keeps us from trying to update a physent during later mapping use,
  287  *              e.g., fault handling.
  288  *
  289  *
  290  */
  291  
  292 addr64_t mapping_make(pmap_t pmap, addr64_t va, ppnum_t pa, unsigned int flags, unsigned int size, vm_prot_t prot) {    /* Make an address mapping */
  293 
  294         register mapping_t *mp;
  295         addr64_t colladdr, psmask;
  296         unsigned int pindex, mflags, pattr, wimg, rc;
  297         phys_entry_t *physent;
  298         int nlists, pcf;
  299 
  300         pindex = 0;
  301         
  302         mflags = 0x01000000;                                                                            /* Start building mpFlags field (busy count = 1) */
  303 
  304         pcf = (flags & mmFlgPcfg) >> 24;                                                        /* Get the physical page config index */
  305         if(!(pPcfg[pcf].pcfFlags)) {                                                            /* Validate requested physical page configuration */
  306                 panic("mapping_make: invalid physical page configuration request - pmap = %08X, va = %016llX, cfg = %d\n",
  307                         pmap, va, pcf);
  308         }
  309         
  310         psmask = (1ULL << pPcfg[pcf].pcfPSize) - 1;                                     /* Mask to isolate any offset into a page */
  311         if(va & psmask) {                                                                                       /* Make sure we are page aligned on virtual */
  312                 panic("mapping_make: attempt to map unaligned vaddr - pmap = %08X, va = %016llX, cfg = %d\n",
  313                         pmap, va, pcf);
  314         }
  315         if(((addr64_t)pa << 12) & psmask) {                                                     /* Make sure we are page aligned on physical */
  316                 panic("mapping_make: attempt to map unaligned paddr - pmap = %08X, pa = %016llX, cfg = %d\n",
  317                         pmap, pa, pcf);
  318         }
  319         
  320         mflags |= (pcf << (31-mpPcfgb));                                                        /* Insert physical page configuration index */
  321 
  322         if(!(flags & mmFlgBlock)) {                                                                     /* Is this a block map? */
  323 
  324                 size = 1;                                                                                               /* Set size to 1 page if not block */
  325          
  326                 physent = mapping_phys_lookup(pa, &pindex);                             /* Get physical entry */
  327                 if(!physent) {                                                                                  /* Did we find the physical page? */
  328                         mflags |= mpBlock;                                                                      /* Force this to a block if no physent */
  329                         pattr = 0;                                                                                      /* Assume normal, non-I/O memory */
  330                         if((pa & 0xFFF80000) == 0x00080000) pattr = mmFlgCInhib | mmFlgGuarded; /* If this page is in I/O range, set I/O attributes */
  331                 }
  332                 else pattr = ((physent->ppLink & (ppI | ppG)) >> 60);   /* Get the default attributes from physent */
  333                 
  334                 if(flags & mmFlgUseAttr) pattr = flags & (mmFlgCInhib | mmFlgGuarded);  /* Use requested attributes */
  335         }
  336         else {                                                                                                          /* This is a block */
  337                  
  338                 pattr = flags & (mmFlgCInhib | mmFlgGuarded);                   /* Use requested attributes */
  339                 mflags |= mpBlock;                                                                              /* Show that this is a block */
  340         
  341                 if(size > pmapSmallBlock) {                                                             /* Is it one? */
  342                         if(size & 0x00001FFF) return mapRtBadSz;                        /* Fail if bigger than 256MB and not a 32MB multiple */
  343                         size = size >> 13;                                                                      /* Convert to 32MB chunks */
  344                         mflags = mflags | mpBSu;                                                        /* Show 32MB basic size unit */
  345                 }
  346         }
  347         
  348         wimg = 0x2;                                                                                                     /* Set basic PPC wimg to 0b0010 - Coherent */
  349         if(pattr & mmFlgCInhib) wimg |= 0x4;                                            /* Add cache inhibited if we need to */
  350         if(pattr & mmFlgGuarded) wimg |= 0x1;                                           /* Add guarded if we need to */
  351         
  352         mflags = mflags | (pindex << 16);                                                       /* Stick in the physical entry table index */
  353         
  354         if(flags & mmFlgPerm) mflags |= mpPerm;                                         /* Set permanent mapping */
  355         
  356         size = size - 1;                                                                                        /* Change size to offset */
  357         if(size > 0xFFFF) return mapRtBadSz;                                            /* Leave if size is too big */
  358         
  359         nlists = mapSetLists(pmap);                                                                     /* Set number of lists this will be on */
  360         
  361         mp = mapping_alloc(nlists);                                                                     /* Get a spare mapping block with this many lists */
  362 
  363                                                                 /* the mapping is zero except that the mpLists field is set */
  364         mp->mpFlags |= mflags;                                                                          /* Add in the rest of the flags to mpLists */
  365         mp->mpSpace = pmap->space;                                                                      /* Set the address space/pmap lookup ID */
  366         mp->u.mpBSize = size;                                                                           /* Set the size */
  367         mp->mpPte = 0;                                                                                          /* Set the PTE invalid */
  368         mp->mpPAddr = pa;                                                                                       /* Set the physical page number */
  369         mp->mpVAddr = (va & ~mpHWFlags) | (wimg << 3)                           /* Add the protection and attributes to the field */
  370                 | ((PerProcTable[0].ppe_vaddr->pf.Available & pf64Bit)?
  371                         getProtPPC(prot) : (getProtPPC(prot) & 0x3));           /* Mask off no-execute control for 32-bit machines */                   
  372         
  373         while(1) {                                                                                                      /* Keep trying... */
  374                 colladdr = hw_add_map(pmap, mp);                                                /* Go add the mapping to the pmap */
  375                 rc = colladdr & mapRetCode;                                                             /* Separate return code */
  376                 colladdr &= ~mapRetCode;                                                                /* Clean up collision effective address */
  377                 
  378                 switch (rc) {
  379                         case mapRtOK:
  380                                 return mapRtOK;                                                                 /* Mapping added successfully */
  381                                 
  382                         case mapRtRemove:                                                                       /* Remove in progress */
  383                                 (void)mapping_remove(pmap, colladdr);                   /* Lend a helping hand to another CPU doing block removal */
  384                                 continue;                                                                               /* Retry mapping add */
  385                                 
  386                         case mapRtMapDup:                                                                       /* Identical mapping already present */
  387                                 mapping_free(mp);                                                               /* Free duplicate mapping */
  388                                 return mapRtOK;                                                                         /* Return success */
  389                                 
  390                         case mapRtSmash:                                                                        /* Mapping already present but does not match new mapping */
  391                                 mapping_free(mp);                                                               /* Free duplicate mapping */
  392                                 return (colladdr | mapRtSmash);                                 /* Return colliding address, with some dirt added to avoid
  393                                                                                                                                    confusion if effective address is 0 */
  394                         default:
  395                                 panic("mapping_make: hw_add_map failed - collision addr = %016llX, code = %02X, pmap = %08X, va = %016llX, mapping = %08X\n",
  396                                         colladdr, rc, pmap, va, mp);                            /* Die dead */
  397                 }
  398                 
  399         }
  400         
  401         return 1;                                                                                                       /* Unreachable, but pleases compiler */
  402 }
  403 
  404 
  405 /*
  406  *              mapping *mapping_find(pmap, va, *nextva, full) - Finds a mapping 
  407  *
  408  *              Looks up the vaddr and returns the mapping and the next mapped va
  409  *              If full is true, it will descend through all nested pmaps to find actual mapping
  410  *
  411  *              Must be called with interruptions disabled or we can hang trying to remove found mapping.
  412  *
  413  *              Returns 0 if not found and the virtual address of the mapping if it is
  414  *              Note that the mappings busy count is bumped. It is the responsibility of the caller
  415  *              to drop the count.  If this is not done, any attempt to remove the mapping will hang.
  416  *
  417  *              NOTE: The nextva field is not valid when full is TRUE.
  418  *
  419  *
  420  */
  421  
  422 mapping_t *mapping_find(pmap_t pmap, addr64_t va, addr64_t *nextva, int full) { /* Make an address mapping */
  423 
  424         register mapping_t *mp;
  425         addr64_t        curva;
  426         pmap_t  curpmap;
  427         int     nestdepth;
  428 
  429         curpmap = pmap;                                                                                         /* Remember entry */
  430         nestdepth = 0;                                                                                          /* Set nest depth */
  431         curva = (addr64_t)va;                                                                           /* Set current va */
  432 
  433         while(1) {
  434 
  435                 mp = hw_find_map(curpmap, curva, nextva);                               /* Find the mapping for this address */
  436                 if((unsigned int)mp == mapRtBadLk) {                                    /* Did we lock up ok? */
  437                         panic("mapping_find: pmap lock failure - rc = %08X, pmap = %08X\n", mp, curpmap);       /* Die... */
  438                 }
  439                 
  440                 if(!mp || ((mp->mpFlags & mpType) < mpMinSpecial) || !full) break;              /* Are we done looking? */
  441 
  442                 if((mp->mpFlags & mpType) != mpNest) {                                  /* Don't chain through anything other than a nested pmap */
  443                         mapping_drop_busy(mp);                                                          /* We have everything we need from the mapping */
  444                         mp = 0;                                                                                         /* Set not found */
  445                         break;
  446                 }
  447 
  448                 if(nestdepth++ > 64) {                                                                  /* Have we nested too far down? */
  449                         panic("mapping_find: too many nested pmaps - va = %016llX, curva = %016llX, pmap = %08X, curpmap = %08X\n",
  450                                 va, curva, pmap, curpmap);
  451                 }
  452                 
  453                 curva = curva + mp->mpNestReloc;                                                /* Relocate va to new pmap */
  454                 curpmap = (pmap_t) pmapTrans[mp->mpSpace].pmapVAddr;    /* Get the address of the nested pmap */
  455                 mapping_drop_busy(mp);                                                                  /* We have everything we need from the mapping */
  456                 
  457         }
  458 
  459         return mp;                                                                                                      /* Return the mapping if we found one */
  460 }
  461 
  462 /*
  463  *              void mapping_protect(pmap_t pmap, addt_t va, vm_prot_t prot, addr64_t *nextva) - change the protection of a virtual page
  464  *
  465  *              This routine takes a pmap and virtual address and changes
  466  *              the protection.  If there are PTEs associated with the mappings, they will be invalidated before
  467  *              the protection is changed. 
  468  *
  469  *              We return success if we change the protection or if there is no page mapped at va.  We return failure if
  470  *              the va corresponds to a block mapped area or the mapping is permanant.
  471  *
  472  *
  473  */
  474 
  475 void
  476 mapping_protect(pmap_t pmap, addr64_t va, vm_prot_t prot, addr64_t *nextva) {   /* Change protection of a virtual page */
  477 
  478         int     ret;
  479         
  480         ret = hw_protect(pmap, va, getProtPPC(prot), nextva);   /* Try to change the protect here */
  481 
  482         switch (ret) {                                                          /* Decode return code */
  483         
  484                 case mapRtOK:                                                   /* Changed */
  485                 case mapRtNotFnd:                                               /* Didn't find it */
  486                 case mapRtBlock:                                                /* Block map, just ignore request */
  487                 case mapRtNest:                                                 /* Nested pmap, just ignore request */
  488                         break;
  489                         
  490                 default:
  491                         panic("mapping_protect: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, va);
  492                 
  493         }
  494 
  495 }
  496 
  497 /*
  498  *              void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) - change the protection of a physical page
  499  *
  500  *              This routine takes a physical entry and runs through all mappings attached to it and changes
  501  *              the protection.  If there are PTEs associated with the mappings, they will be invalidated before
  502  *              the protection is changed.  There is no limitation on changes, e.g., 
  503  *              higher to lower, lower to higher.
  504  *
  505  *              Any mapping that is marked permanent is not changed
  506  *
  507  *              Phys_entry is unlocked.
  508  */
  509 
  510 void mapping_protect_phys(ppnum_t pa, vm_prot_t prot) { /* Change protection of all mappings to page */
  511         
  512         unsigned int pindex;
  513         phys_entry_t *physent;
  514         
  515         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  516         if(!physent) {                                                                                          /* Did we find the physical page? */
  517                 panic("mapping_protect_phys: invalid physical page %08X\n", pa);
  518         }
  519 
  520         hw_walk_phys(physent, hwpNoop, hwpSPrtMap, hwpNoop,
  521                      getProtPPC(prot), hwpPurgePTE);                            /* Set the new protection for page and mappings */
  522 
  523         return;                                                                                                         /* Leave... */
  524 }
  525 
  526 
  527 /*
  528  *              void mapping_clr_mod(ppnum_t pa) - clears the change bit of a physical page
  529  *
  530  *              This routine takes a physical entry and runs through all mappings attached to it and turns
  531  *              off the change bit. 
  532  */
  533 
  534 void mapping_clr_mod(ppnum_t pa) {                                                              /* Clears the change bit of a physical page */
  535 
  536         unsigned int pindex;
  537         phys_entry_t *physent;
  538         
  539         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  540         if(!physent) {                                                                                          /* Did we find the physical page? */
  541                 panic("mapping_clr_mod: invalid physical page %08X\n", pa);
  542         }
  543 
  544         hw_walk_phys(physent, hwpNoop, hwpCCngMap, hwpCCngPhy,
  545                                  0, hwpPurgePTE);                                                               /* Clear change for page and mappings */
  546         return;                                                                                                         /* Leave... */
  547 }
  548 
  549 
  550 /*
  551  *              void mapping_set_mod(ppnum_t pa) - set the change bit of a physical page
  552  *
  553  *              This routine takes a physical entry and runs through all mappings attached to it and turns
  554  *              on the change bit.  
  555  */
  556 
  557 void mapping_set_mod(ppnum_t pa) {                                                              /* Sets the change bit of a physical page */
  558 
  559         unsigned int pindex;
  560         phys_entry_t *physent;
  561         
  562         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  563         if(!physent) {                                                                                          /* Did we find the physical page? */
  564                 panic("mapping_set_mod: invalid physical page %08X\n", pa);
  565         }
  566 
  567         hw_walk_phys(physent, hwpNoop, hwpSCngMap, hwpSCngPhy,
  568                                  0, hwpNoopPTE);                                                                /* Set change for page and mappings */
  569         return;                                                                                                         /* Leave... */
  570 }
  571 
  572 
  573 /*
  574  *              void mapping_clr_ref(ppnum_t pa) - clears the reference bit of a physical page
  575  *
  576  *              This routine takes a physical entry and runs through all mappings attached to it and turns
  577  *              off the reference bit.  
  578  */
  579 
  580 void mapping_clr_ref(ppnum_t pa) {                                                              /* Clears the reference bit of a physical page */
  581 
  582         unsigned int pindex;
  583         phys_entry_t *physent;
  584         
  585         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  586         if(!physent) {                                                                                          /* Did we find the physical page? */
  587                 panic("mapping_clr_ref: invalid physical page %08X\n", pa);
  588         }
  589 
  590         hw_walk_phys(physent, hwpNoop, hwpCRefMap, hwpCRefPhy,
  591                                  0, hwpPurgePTE);                                                               /* Clear reference for page and mappings */
  592         return;                                                                                                         /* Leave... */
  593 }
  594 
  595 
  596 /*
  597  *              void mapping_set_ref(ppnum_t pa) - set the reference bit of a physical page
  598  *
  599  *              This routine takes a physical entry and runs through all mappings attached to it and turns
  600  *              on the reference bit. 
  601  */
  602 
  603 void mapping_set_ref(ppnum_t pa) {                                                              /* Sets the reference bit of a physical page */
  604 
  605         unsigned int pindex;
  606         phys_entry_t *physent;
  607         
  608         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  609         if(!physent) {                                                                                          /* Did we find the physical page? */
  610                 panic("mapping_set_ref: invalid physical page %08X\n", pa);
  611         }
  612 
  613         hw_walk_phys(physent, hwpNoop, hwpSRefMap, hwpSRefPhy,
  614                                  0, hwpNoopPTE);                                                                /* Set reference for page and mappings */
  615         return;                                                                                                         /* Leave... */
  616 }
  617 
  618 
  619 /*
  620  *              boolean_t mapping_tst_mod(ppnum_t pa) - test the change bit of a physical page
  621  *
  622  *              This routine takes a physical entry and runs through all mappings attached to it and tests
  623  *              the changed bit. 
  624  */
  625 
  626 boolean_t mapping_tst_mod(ppnum_t pa) {                                                 /* Tests the change bit of a physical page */
  627 
  628         unsigned int pindex, rc;
  629         phys_entry_t *physent;
  630         
  631         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  632         if(!physent) {                                                                                          /* Did we find the physical page? */
  633                 panic("mapping_tst_mod: invalid physical page %08X\n", pa);
  634         }
  635 
  636         rc = hw_walk_phys(physent, hwpTCngPhy, hwpTCngMap, hwpNoop,
  637                                           0, hwpMergePTE);                                                      /* Set change for page and mappings */
  638         return ((rc & (unsigned long)ppC) != 0);                                        /* Leave with change bit */
  639 }
  640 
  641 
  642 /*
  643  *              boolean_t mapping_tst_ref(ppnum_t pa) - tests the reference bit of a physical page
  644  *
  645  *              This routine takes a physical entry and runs through all mappings attached to it and tests
  646  *              the reference bit. 
  647  */
  648 
  649 boolean_t mapping_tst_ref(ppnum_t pa) {                                                 /* Tests the reference bit of a physical page */
  650 
  651         unsigned int pindex, rc;
  652         phys_entry_t *physent;
  653         
  654         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  655         if(!physent) {                                                                                          /* Did we find the physical page? */
  656                 panic("mapping_tst_ref: invalid physical page %08X\n", pa);
  657         }
  658 
  659         rc = hw_walk_phys(physent, hwpTRefPhy, hwpTRefMap, hwpNoop,
  660                           0, hwpMergePTE);                                                      /* Test reference for page and mappings */
  661         return ((rc & (unsigned long)ppR) != 0);                                        /* Leave with reference bit */
  662 }
  663 
  664 
  665 /*
  666  *              unsigned int mapping_tst_refmod(ppnum_t pa) - tests the reference and change bits of a physical page
  667  *
  668  *              This routine takes a physical entry and runs through all mappings attached to it and tests
  669  *              their reference and changed bits. 
  670  */
  671 
  672 unsigned int mapping_tst_refmod(ppnum_t pa) {                                   /* Tests the reference and change bits of a physical page */
  673         
  674         unsigned int  pindex, rc;
  675         phys_entry_t *physent;
  676         
  677         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  678         if (!physent) {                                                                                         /* Did we find the physical page? */
  679                 panic("mapping_tst_refmod: invalid physical page %08X\n", pa);
  680         }
  681 
  682         rc = hw_walk_phys(physent, hwpTRefCngPhy, hwpTRefCngMap, hwpNoop,
  683                                           0, hwpMergePTE);                                                      /* Test reference and change bits in page and mappings */
  684         return (((rc & ppC)? VM_MEM_MODIFIED : 0) | ((rc & ppR)? VM_MEM_REFERENCED : 0));
  685                                                                                                                                 /* Convert bits to generic format and return */
  686         
  687 }
  688 
  689 
  690 /*
  691  *              void mapping_clr_refmod(ppnum_t pa, unsigned int mask) - clears the reference and change bits specified
  692  *        by mask of a physical page
  693  *
  694  *              This routine takes a physical entry and runs through all mappings attached to it and turns
  695  *              off all the reference and change bits.  
  696  */
  697 
  698 void mapping_clr_refmod(ppnum_t pa, unsigned int mask) {                /* Clears the reference and change bits of a physical page */
  699 
  700         unsigned int  pindex;
  701         phys_entry_t *physent;
  702         unsigned int  ppcMask;
  703         
  704         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
  705         if(!physent) {                                                                                          /* Did we find the physical page? */
  706                 panic("mapping_clr_refmod: invalid physical page %08X\n", pa);
  707         }
  708 
  709         ppcMask = (((mask & VM_MEM_MODIFIED)? ppC : 0) | ((mask & VM_MEM_REFERENCED)? ppR : 0));
  710                                                                                                                                 /* Convert mask bits to PPC-specific format */
  711         hw_walk_phys(physent, hwpNoop, hwpCRefCngMap, hwpCRefCngPhy,
  712                      ppcMask, hwpPurgePTE);                                                     /* Clear reference and change bits for page and mappings */
  713         return;                                                                                                         /* Leave... */
  714 }
  715 
  716 
  717 
  718 /*
  719  *              phys_ent  *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) - tests the reference bit of a physical page
  720  *
  721  *              This routine takes a physical page number and returns the phys_entry associated with it.  It also
  722  *              calculates the bank address associated with the entry
  723  *              the reference bit. 
  724  */
  725 
  726 phys_entry_t *mapping_phys_lookup(ppnum_t pp, unsigned int *pindex) {   /* Finds the physical entry for the page */
  727 
  728         int i;
  729         
  730         for(i = 0; i < pmap_mem_regions_count; i++) {                           /* Walk through the list */
  731                 if(!(unsigned int)pmap_mem_regions[i].mrPhysTab) continue;      /* Skip any empty lists */
  732                 if((pp < pmap_mem_regions[i].mrStart) || (pp > pmap_mem_regions[i].mrEnd)) continue;    /* This isn't ours */
  733                 
  734                 *pindex = (i * sizeof(mem_region_t)) / 4;                               /* Make the word index to this list */
  735                 
  736                 return &pmap_mem_regions[i].mrPhysTab[pp - pmap_mem_regions[i].mrStart];        /* Return the physent pointer */
  737         }
  738         
  739         return (phys_entry_t *)0;                                                                               /* Shucks, can't find it... */
  740         
  741 }
  742 
  743 
  744 
  745 
  746 /*
  747  *              mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones 
  748  *
  749  *              This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks
  750  *              the number of free mappings remaining, and if below a threshold, replenishes them.
  751  *              The list will be replenshed from mapCtl.mapcrel if there are enough.  Otherwise,
  752  *              a new one is allocated.
  753  *
  754  *              This routine allocates and/or frees memory and must be called from a safe place. 
  755  *              Currently, vm_pageout_scan is the safest place. 
  756  */
  757 
  758 thread_call_t                           mapping_adjust_call;
  759 static thread_call_data_t       mapping_adjust_call_data;
  760 
  761 void mapping_adjust(void) {                                                                             /* Adjust free mappings */
  762 
  763         kern_return_t   retr = KERN_SUCCESS;
  764         mappingblok_t   *mb, *mbn;
  765         spl_t                   s;
  766         int                             allocsize;
  767 
  768         if(mapCtl.mapcmin <= MAPPERBLOK) {
  769                 mapCtl.mapcmin = (sane_size / PAGE_SIZE) / 16;
  770 
  771 #if DEBUG
  772                 kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin);
  773                 kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n",
  774                   mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln);
  775 #endif
  776         }
  777 
  778         s = splhigh();                                                                                          /* Don't bother from now on */
  779         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
  780                 panic("mapping_adjust - timeout getting control lock (1)\n");   /* Tell all and die */
  781         }
  782         
  783         if (mapping_adjust_call == NULL) {
  784                 thread_call_setup(&mapping_adjust_call_data, 
  785                                   (thread_call_func_t)mapping_adjust, 
  786                                   (thread_call_param_t)NULL);
  787                 mapping_adjust_call = &mapping_adjust_call_data;
  788         }
  789 
  790         while(1) {                                                                                                      /* Keep going until we've got enough */
  791                 
  792                 allocsize = mapCtl.mapcmin - mapCtl.mapcfree;                   /* Figure out how much we need */
  793                 if(allocsize < 1) break;                                                                /* Leave if we have all we need */
  794                 
  795                 if((unsigned int)(mbn = mapCtl.mapcrel)) {                              /* Can we rescue a free one? */
  796                         mapCtl.mapcrel = mbn->nextblok;                                         /* Dequeue it */
  797                         mapCtl.mapcreln--;                                                                      /* Back off the count */
  798                         allocsize = MAPPERBLOK;                                                         /* Show we allocated one block */                       
  799                 }
  800         else {                                                                                                  /* No free ones, try to get it */
  801                         
  802                         allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK;  /* Get the number of pages we need */
  803                         
  804                         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
  805                         splx(s);                                                                                        /* Restore 'rupts */
  806 
  807                         for(; allocsize > 0; allocsize >>= 1) {                         /* Try allocating in descending halves */ 
  808                                 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize);       /* Find a virtual address to use */
  809                                 if((retr != KERN_SUCCESS) && (allocsize == 1)) {        /* Did we find any memory at all? */
  810                                         break;
  811                                 }
  812                                 if(retr == KERN_SUCCESS) break;                                 /* We got some memory, bail out... */
  813                         }
  814                 
  815                         allocsize = allocsize * MAPPERBLOK;                                     /* Convert pages to number of maps allocated */
  816                         s = splhigh();                                                                          /* Don't bother from now on */
  817                         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
  818                                 panic("mapping_adjust - timeout getting control lock (2)\n");   /* Tell all and die */
  819                         }
  820                 }
  821 
  822                 if (retr != KERN_SUCCESS)
  823                         break;                                                                                          /* Fail to alocate, bail out... */
  824                 for(; allocsize > 0; allocsize -= MAPPERBLOK) {                 /* Release one block at a time */
  825                         mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
  826                         mbn = (mappingblok_t *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */
  827                 }
  828 
  829                 if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
  830                         mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
  831         }
  832 
  833         if(mapCtl.mapcholdoff) {                                                                        /* Should we hold off this release? */
  834                 mapCtl.mapcrecurse = 0;                                                                 /* We are done now */
  835                 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
  836                 splx(s);                                                                                                /* Restore 'rupts */
  837                 return;                                                                                                 /* Return... */
  838         }
  839 
  840         mbn = mapCtl.mapcrel;                                                                           /* Get first pending release block */
  841         mapCtl.mapcrel = 0;                                                                                     /* Dequeue them */
  842         mapCtl.mapcreln = 0;                                                                            /* Set count to 0 */
  843 
  844         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                            /* Unlock our stuff */
  845         splx(s);                                                                                                        /* Restore 'rupts */
  846 
  847         while((unsigned int)mbn) {                                                                      /* Toss 'em all */
  848                 mb = mbn->nextblok;                                                                             /* Get the next */
  849                 
  850                 kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE);   /* Release this mapping block */
  851         
  852                 mbn = mb;                                                                                               /* Chain to the next */
  853         }
  854 
  855         __asm__ volatile("eieio");                                                                      /* Make sure all is well */
  856         mapCtl.mapcrecurse = 0;                                                                         /* We are done now */
  857         return;
  858 }
  859 
  860 /*
  861  *              mapping_free(mapping *mp) - release a mapping to the free list 
  862  *
  863  *              This routine takes a mapping and adds it to the free list.
  864  *              If this mapping make the block non-empty, we queue it to the free block list.
  865  *              NOTE: we might want to queue it to the end to keep quelch the pathalogical
  866  *              case when we get a mapping and free it repeatedly causing the block to chain and unchain.
  867  *              If this release fills a block and we are above the threshold, we release the block
  868  */
  869 
  870 void mapping_free(struct mapping *mp) {                                                 /* Release a mapping */
  871 
  872         mappingblok_t   *mb, *mbn;
  873         spl_t                   s;
  874         unsigned int    full, mindx, lists;
  875 
  876         mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 6;                      /* Get index to mapping */
  877         mb = (mappingblok_t *)((unsigned int)mp & -PAGE_SIZE);          /* Point to the mapping block */
  878     lists = (mp->mpFlags & mpLists);                                                    /* get #lists */
  879     if ((lists == 0) || (lists > kSkipListMaxLists))                    /* panic if out of range */
  880         panic("mapping_free: mpLists invalid\n");
  881 
  882 #if 0
  883         mp->mpFlags = 0x99999999;                                                                       /* (BRINGUP) */ 
  884         mp->mpSpace = 0x9999;                                                                           /* (BRINGUP) */ 
  885         mp->u.mpBSize = 0x9999;                                                                         /* (BRINGUP) */ 
  886         mp->mpPte   = 0x99999998;                                                                       /* (BRINGUP) */ 
  887         mp->mpPAddr = 0x99999999;                                                                       /* (BRINGUP) */ 
  888         mp->mpVAddr = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
  889         mp->mpAlias = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
  890         mp->mpList0 = 0x9999999999999999ULL;                                            /* (BRINGUP) */ 
  891         mp->mpList[0] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
  892         mp->mpList[1] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
  893         mp->mpList[2] = 0x9999999999999999ULL;                                          /* (BRINGUP) */ 
  894 
  895         if(lists > mpBasicLists) {                                                                      /* (BRINGUP) */ 
  896                 mp->mpList[3] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  897                 mp->mpList[4] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  898                 mp->mpList[5] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  899                 mp->mpList[6] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  900                 mp->mpList[7] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  901                 mp->mpList[8] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  902                 mp->mpList[9] = 0x9999999999999999ULL;                                  /* (BRINGUP) */ 
  903                 mp->mpList[10] = 0x9999999999999999ULL;                                 /* (BRINGUP) */ 
  904         }
  905 #endif  
  906         
  907 
  908         s = splhigh();                                                                                          /* Don't bother from now on */
  909         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
  910                 panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */
  911         }
  912         
  913         full = !(mb->mapblokfree[0] | mb->mapblokfree[1]);                      /* See if full now */ 
  914         mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));    /* Flip on the free bit */
  915     if ( lists > mpBasicLists ) {                                                               /* if big block, lite the 2nd bit too */
  916         mindx++;
  917         mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31));
  918         mapCtl.mapcfree++;
  919         mapCtl.mapcinuse--;
  920     }
  921         
  922         if(full) {                                                                                                      /* If it was full before this: */
  923                 mb->nextblok = mapCtl.mapcnext;                                                 /* Move head of list to us */
  924                 mapCtl.mapcnext = mb;                                                                   /* Chain us to the head of the list */
  925                 if(!((unsigned int)mapCtl.mapclast))
  926                         mapCtl.mapclast = mb;
  927         }
  928 
  929         mapCtl.mapcfree++;                                                                                      /* Bump free count */
  930         mapCtl.mapcinuse--;                                                                                     /* Decriment in use count */
  931         
  932         mapCtl.mapcfreec++;                                                                                     /* Count total calls */
  933 
  934         if(mapCtl.mapcfree > mapCtl.mapcmin) {                                          /* Should we consider releasing this? */
  935                 if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1]) == 0xFFFFFFFF) {    /* See if empty now */ 
  936 
  937                         if(mapCtl.mapcnext == mb) {                                                     /* Are we first on the list? */
  938                                 mapCtl.mapcnext = mb->nextblok;                                 /* Unchain us */
  939                                 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0;       /* If last, remove last */
  940                         }
  941                         else {                                                                                          /* We're not first */
  942                                 for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) {     /* Search for our block */
  943                                         if(mbn->nextblok == mb) break;                          /* Is the next one our's? */
  944                                 }
  945                                 if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp);
  946                                 mbn->nextblok = mb->nextblok;                                   /* Dequeue us */
  947                                 if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn;        /* If last, make our predecessor last */
  948                         }
  949                         
  950                         if(mb->mapblokflags & mbPerm) {                                         /* Is this permanently assigned? */
  951                                 mb->nextblok = mapCtl.mapcnext;                                 /* Move chain head to us */
  952                                 mapCtl.mapcnext = mb;                                                   /* Chain us to the head */
  953                                 if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */
  954                         }
  955                         else {
  956                                 mapCtl.mapcfree -= MAPPERBLOK;                                  /* Remove the block from the free count */
  957                                 mapCtl.mapcreln++;                                                              /* Count on release list */
  958                                 mb->nextblok = mapCtl.mapcrel;                                  /* Move pointer */
  959                                 mapCtl.mapcrel = mb;                                                    /* Chain us in front */
  960                         }
  961                 }
  962         }
  963 
  964         if(mapCtl.mapcreln > MAPFRTHRSH) {                                                      /* Do we have way too many releasable mappings? */
  965                 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {   /* Make sure we aren't recursing */
  966                         thread_call_enter(mapping_adjust_call);                         /* Go toss some */
  967                 }
  968         }
  969         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                            /* Unlock our stuff */
  970         splx(s);                                                                                                        /* Restore 'rupts */
  971 
  972         return;                                                                                                         /* Bye, dude... */
  973 }
  974 
  975 
  976 /*
  977  *              mapping_alloc(lists) - obtain a mapping from the free list 
  978  *
  979  *              This routine takes a mapping off of the free list and returns its address.
  980  *              The mapping is zeroed, and its mpLists count is set.  The caller passes in
  981  *              the number of skiplists it would prefer; if this number is greater than 
  982  *              mpBasicLists (ie, 4) then we need to allocate a 128-byte mapping, which is
  983  *              just two consequtive free entries coallesced into one.  If we cannot find
  984  *              two consequtive free entries, we clamp the list count down to mpBasicLists
  985  *              and return a basic 64-byte node.  Our caller never knows the difference.
  986  *
  987  *              If this allocation empties a block, we remove it from the free list.
  988  *              If this allocation drops the total number of free entries below a threshold,
  989  *              we allocate a new block.
  990  *
  991  */
  992 decl_simple_lock_data(extern,free_pmap_lock)
  993 
  994 mapping_t *
  995 mapping_alloc(int lists) {                                                              /* Obtain a mapping */
  996 
  997         register mapping_t *mp;
  998         mappingblok_t   *mb, *mbn;
  999         spl_t                   s;
 1000         int                             mindx;
 1001     int                         big = (lists > mpBasicLists);                           /* set flag if big block req'd */
 1002         pmap_t                  refpmap, ckpmap;
 1003         unsigned int    space, i;
 1004         addr64_t                va, nextva;
 1005         boolean_t               found_mapping;
 1006         boolean_t               do_rescan;
 1007     
 1008         s = splhigh();                                                                                          /* Don't bother from now on */
 1009         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
 1010                 panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
 1011         }
 1012 
 1013         if(!((unsigned int)mapCtl.mapcnext)) {                                          /* Are there any free mappings? */
 1014         
 1015 /*
 1016  *              No free mappings.  First, there may be some mapping blocks on the "to be released"
 1017  *              list.  If so, rescue one.  Otherwise, try to steal a couple blocks worth.
 1018  */
 1019 
 1020                 if((mbn = mapCtl.mapcrel) != 0) {                                               /* Try to rescue a block from impending doom */
 1021                         mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
 1022                         mapCtl.mapcreln--;                                                                      /* Back off the count */
 1023                         mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
 1024                         goto rescued;
 1025                 }
 1026 
 1027                 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
 1028 
 1029                 simple_lock(&free_pmap_lock);
 1030 
 1031                 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
 1032                         panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
 1033                 }
 1034 
 1035                 if (!((unsigned int)mapCtl.mapcnext)) {
 1036 
 1037                         refpmap = (pmap_t)cursor_pmap->pmap_link.next;
 1038                         space = mapCtl.mapcflush.spacenum;
 1039                         while (refpmap != cursor_pmap) {
 1040                                 if(((pmap_t)(refpmap->pmap_link.next))->spaceNum > space) break;
 1041                                 refpmap = (pmap_t)refpmap->pmap_link.next;
 1042                         }
 1043 
 1044                         ckpmap = refpmap;
 1045                         va = mapCtl.mapcflush.addr;
 1046                         found_mapping = FALSE;
 1047 
 1048                         while (mapCtl.mapcfree <= (MAPPERBLOK*2)) {
 1049 
 1050                                 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);
 1051 
 1052                                 ckpmap = (pmap_t)ckpmap->pmap_link.next;
 1053 
 1054                                 /* We don't steal mappings from the kernel pmap, a VMM host pmap, or a VMM guest pmap with guest
 1055                                    shadow assist active.
 1056                                  */
 1057                                 if ((ckpmap->stats.resident_count != 0) && (ckpmap != kernel_pmap)
 1058                                                                                                                 && !(ckpmap->pmapFlags & (pmapVMgsaa|pmapVMhost))) {
 1059                                         do_rescan = TRUE;
 1060                                         for (i=0;i<8;i++) {
 1061                                                 mp = hw_purge_map(ckpmap, va, &nextva);
 1062 
 1063                                                 switch ((unsigned int)mp & mapRetCode) {
 1064                                                         case mapRtOK:
 1065                                                                 mapping_free(mp);
 1066                                                                 found_mapping = TRUE;
 1067                                                                 break;
 1068                                                         case mapRtNotFnd:
 1069                                                                 break;
 1070                                                         default:
 1071                                                                 panic("mapping_alloc: hw_purge_map failed - pmap = %08X, va = %16llX, code = %08X\n", ckpmap, va, mp);
 1072                                                                 break;
 1073                                                 }
 1074 
 1075                                                 if (mapRtNotFnd == ((unsigned int)mp & mapRetCode)) 
 1076                                                         if (do_rescan)
 1077                                                                 do_rescan = FALSE;
 1078                                                         else
 1079                                                                 break;
 1080 
 1081                                                 va = nextva;
 1082                                         }
 1083                                 }
 1084 
 1085                                 if (ckpmap == refpmap) {
 1086                                         if (found_mapping == FALSE)
 1087                                                 panic("no valid pmap to purge mappings\n");
 1088                                         else
 1089                                                 found_mapping = FALSE;
 1090                                 }
 1091 
 1092                                 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
 1093                                         panic("mapping_alloc - timeout getting control lock\n");        /* Tell all and die */
 1094                                 }
 1095 
 1096                         }
 1097 
 1098                         mapCtl.mapcflush.spacenum = ckpmap->spaceNum;
 1099                         mapCtl.mapcflush.addr = nextva;
 1100                 }
 1101 
 1102                 simple_unlock(&free_pmap_lock);
 1103         }
 1104 
 1105 rescued:
 1106 
 1107         mb = mapCtl.mapcnext;
 1108     
 1109     if ( big ) {                                                                                                /* if we need a big (128-byte) mapping */
 1110         mapCtl.mapcbig++;                                                                               /* count attempts to allocate a big mapping */
 1111         mbn = NULL;                                                                                             /* this will be prev ptr */
 1112         mindx = 0;
 1113         while( mb ) {                                                                                   /* loop over mapping blocks with free entries */
 1114             mindx = mapalc2(mb);                                                                /* try for 2 consequtive free bits in this block */
 1115 
 1116            if ( mindx ) break;                                                                  /* exit loop if we found them */
 1117             mbn = mb;                                                                                   /* remember previous block */
 1118             mb = mb->nextblok;                                                                  /* move on to next block */
 1119         }
 1120         if ( mindx == 0 ) {                                                                             /* if we couldn't find 2 consequtive bits... */
 1121             mapCtl.mapcbigfails++;                                                              /* count failures */
 1122             big = 0;                                                                                    /* forget that we needed a big mapping */
 1123             lists = mpBasicLists;                                                               /* clamp list count down to the max in a 64-byte mapping */
 1124             mb = mapCtl.mapcnext;                                                               /* back to the first block with a free entry */
 1125         }
 1126         else {                                                                                                  /* if we did find a big mapping */
 1127             mapCtl.mapcfree--;                                                                  /* Decrement free count twice */
 1128             mapCtl.mapcinuse++;                                                                 /* Bump in use count twice */
 1129             if ( mindx < 0 ) {                                                                  /* if we just used the last 2 free bits in this block */
 1130                 if (mbn) {                                                                              /* if this wasn't the first block */
 1131                     mindx = -mindx;                                                             /* make positive */
 1132                     mbn->nextblok = mb->nextblok;                               /* unlink this one from the middle of block list */
 1133                     if (mb ==  mapCtl.mapclast) {                               /* if we emptied last block */
 1134                         mapCtl.mapclast = mbn;                                  /* then prev block is now last */
 1135                     }
 1136                 }
 1137             }
 1138         }
 1139     }
 1140     
 1141     if ( !big ) {                                                                                               /* if we need a small (64-byte) mapping */
 1142         if(!(mindx = mapalc1(mb)))                                                              /* Allocate a 1-bit slot */
 1143             panic("mapping_alloc - empty mapping block detected at %08X\n", mb);
 1144     }
 1145         
 1146         if(mindx < 0) {                                                                                         /* Did we just take the last one */
 1147                 mindx = -mindx;                                                                                 /* Make positive */
 1148                 mapCtl.mapcnext = mb->nextblok;                                                 /* Remove us from the list */
 1149                 if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0;       /* Removed the last one */
 1150         }
 1151         
 1152         mapCtl.mapcfree--;                                                                                      /* Decrement free count */
 1153         mapCtl.mapcinuse++;                                                                                     /* Bump in use count */
 1154         
 1155         mapCtl.mapcallocc++;                                                                            /* Count total calls */
 1156 
 1157 /*
 1158  *      Note: in the following code, we will attempt to rescue blocks only one at a time.
 1159  *      Eventually, after a few more mapping_alloc calls, we will catch up.  If there are none
 1160  *      rescueable, we will kick the misc scan who will allocate some for us.  We only do this
 1161  *      if we haven't already done it.
 1162  *      For early boot, we are set up to only rescue one block at a time.  This is because we prime
 1163  *      the release list with as much as we need until threads start.
 1164  */
 1165 
 1166         if(mapCtl.mapcfree < mapCtl.mapcmin) {                                          /* See if we need to replenish */
 1167                 if((mbn = mapCtl.mapcrel) != 0) {                                               /* Try to rescue a block from impending doom */
 1168                         mapCtl.mapcrel = mbn->nextblok;                                         /* Pop the queue */
 1169                         mapCtl.mapcreln--;                                                                      /* Back off the count */
 1170                         mapping_free_init((vm_offset_t)mbn, 0, 1);                      /* Initialize a non-permanent block */
 1171                 }
 1172                 else {                                                                                                  /* We need to replenish */
 1173                         if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
 1174                                 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {   /* Make sure we aren't recursing */
 1175                                         thread_call_enter(mapping_adjust_call);                 /* Go allocate some more */
 1176                                 }
 1177                         }
 1178                 }
 1179         }
 1180 
 1181         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                            /* Unlock our stuff */
 1182         splx(s);                                                                                                        /* Restore 'rupts */
 1183         
 1184         mp = &((mapping_t *)mb)[mindx];                                                         /* Point to the allocated mapping */
 1185     mp->mpFlags = lists;                                                                                /* set the list count */
 1186 
 1187 
 1188         return mp;                                                                                                      /* Send it back... */
 1189 }
 1190 
 1191 
 1192 void
 1193 consider_mapping_adjust(void)
 1194 {
 1195         spl_t                   s;
 1196 
 1197         s = splhigh();                                                                                          /* Don't bother from now on */
 1198         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {     /* Lock the control header */ 
 1199                 panic("consider_mapping_adjust -- lock timeout\n");
 1200         }
 1201 
 1202         if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) {
 1203                 if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {   /* Make sure we aren't recursing */
 1204                         thread_call_enter(mapping_adjust_call);                 /* Go allocate some more */
 1205                 }
 1206         }
 1207 
 1208         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                            /* Unlock our stuff */
 1209         splx(s);                                                                                                        /* Restore 'rupts */
 1210         
 1211 }
 1212 
 1213 
 1214 
 1215 /*
 1216  *              void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list
 1217  *
 1218  *              The mapping block is a page size area on a page boundary.  It contains 1 header and 63
 1219  *              mappings.  This call adds and initializes a block for use.  Mappings come in two sizes,
 1220  *              64 and 128 bytes (the only difference is the number of skip-lists.)  When we allocate a
 1221  *              128-byte mapping we just look for two consequtive free 64-byte mappings, so most of the
 1222  *              code only deals with "basic" 64-byte mappings.  This works for two reasons:
 1223  *                      - Only one in 256 mappings is big, so they are rare.
 1224  *                      - If we cannot find two consequtive free mappings, we just return a small one.
 1225  *                        There is no problem with doing this, except a minor performance degredation.
 1226  *              Therefore, all counts etc in the mapping control structure are in units of small blocks.
 1227  *      
 1228  *              The header contains a chain link, bit maps, a virtual to real translation mask, and
 1229  *              some statistics. Bit maps map each slot on the page (bit 0 is not used because it 
 1230  *              corresponds to the header).  The translation mask is the XOR of the virtual and real
 1231  *              addresses (needless to say, the block must be wired).
 1232  *
 1233  *              We handle these mappings the same way as saveareas: the block is only on the chain so
 1234  *              long as there are free entries in it.
 1235  *
 1236  *              Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free 
 1237  *              mappings. Blocks marked PERM won't ever be released.
 1238  *
 1239  *              If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel
 1240  *              list.  We do this only at start up time. This is done because we only allocate blocks 
 1241  *              in the pageout scan and it doesn't start up until after we run out of the initial mappings.
 1242  *              Therefore, we need to preallocate a bunch, but we don't want them to be permanent.  If we put
 1243  *              them on the release queue, the allocate routine will rescue them.  Then when the
 1244  *              pageout scan starts, all extra ones will be released.
 1245  *
 1246  */
 1247 
 1248 
 1249 void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) {           
 1250                                                                                                                         /* Set's start and end of a block of mappings
 1251                                                                                                                            perm indicates if the block can be released 
 1252                                                                                                                            or goes straight to the release queue .
 1253                                                                                                                            locked indicates if the lock is held already */
 1254                                                                                                                    
 1255         mappingblok_t   *mb;
 1256         spl_t           s;
 1257         addr64_t        raddr;
 1258         ppnum_t         pp;
 1259 
 1260         mb = (mappingblok_t *)mbl;                                                              /* Start of area */     
 1261         
 1262         if(perm >= 0) {                                                                                 /* See if we need to initialize the block */
 1263                 if(perm) {
 1264                         raddr = (addr64_t)((unsigned int)mbl);                  /* Perm means V=R */
 1265                         mb->mapblokflags = mbPerm;                                              /* Set perm */
 1266 //                      mb->mapblokflags |= (unsigned int)mb;                   /* (BRINGUP) */
 1267                 }
 1268                 else {
 1269                         pp = pmap_find_phys(kernel_pmap, (addr64_t)mbl);        /* Get the physical page */
 1270                         if(!pp) {                                                                               /* What gives?  Where's the page? */
 1271                                 panic("mapping_free_init: could not find translation for vaddr %016llX\n", (addr64_t)mbl);
 1272                         }
 1273                         
 1274                         raddr = (addr64_t)pp << 12;                                             /* Convert physical page to physical address */
 1275                         mb->mapblokflags = 0;                                                   /* Set not perm */
 1276 //                      mb->mapblokflags |= (unsigned int)mb;                   /* (BRINGUP) */
 1277                 }
 1278                 
 1279                 mb->mapblokvrswap = raddr ^ (addr64_t)((unsigned int)mbl);              /* Form translation mask */
 1280                 
 1281                 mb->mapblokfree[0] = 0x7FFFFFFF;                                        /* Set first 32 (minus 1) free */
 1282                 mb->mapblokfree[1] = 0xFFFFFFFF;                                        /* Set next 32 free */
 1283         }
 1284         
 1285         s = splhigh();                                                                                  /* Don't bother from now on */
 1286         if(!locked) {                                                                                   /* Do we need the lock? */
 1287                 if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {             /* Lock the control header */ 
 1288                         panic("mapping_free_init: timeout getting control lock\n");     /* Tell all and die */
 1289                 }
 1290         }
 1291         
 1292         if(perm < 0) {                                                                                  /* Direct to release queue? */
 1293                 mb->nextblok = mapCtl.mapcrel;                                          /* Move forward pointer */
 1294                 mapCtl.mapcrel = mb;                                                            /* Queue us on in */
 1295                 mapCtl.mapcreln++;                                                                      /* Count the free block */
 1296         }
 1297         else {                                                                                                  /* Add to the free list */
 1298                 
 1299                 mb->nextblok = 0;                                                                       /* We always add to the end */
 1300                 mapCtl.mapcfree += MAPPERBLOK;                                          /* Bump count */
 1301                 
 1302                 if(!((unsigned int)mapCtl.mapcnext)) {                          /* First entry on list? */
 1303                         mapCtl.mapcnext = mapCtl.mapclast = mb;                 /* Chain to us */
 1304                 }
 1305                 else {                                                                                          /* We are not the first */
 1306                         mapCtl.mapclast->nextblok = mb;                                 /* Point the last to us */
 1307                         mapCtl.mapclast = mb;                                                   /* We are now last */
 1308                 }
 1309         }
 1310                 
 1311         if(!locked) {                                                                                   /* Do we need to unlock? */
 1312                 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
 1313         }
 1314 
 1315         splx(s);                                                                                                /* Restore 'rupts */
 1316         return;                                                                                                 /* All done, leave... */
 1317 }
 1318 
 1319 
 1320 /*
 1321  *              void mapping_prealloc(unsigned int) - Preallocates mapppings for large request
 1322  *      
 1323  *              No locks can be held, because we allocate memory here.
 1324  *              This routine needs a corresponding mapping_relpre call to remove the
 1325  *              hold off flag so that the adjust routine will free the extra mapping
 1326  *              blocks on the release list.  I don't like this, but I don't know
 1327  *              how else to do this for now...
 1328  *
 1329  */
 1330 
 1331 void mapping_prealloc(unsigned int size) {                                      /* Preallocates mapppings for large request */
 1332 
 1333         int     nmapb, i;
 1334         kern_return_t   retr;
 1335         mappingblok_t   *mbn;
 1336         spl_t           s;
 1337 
 1338         s = splhigh();                                                                                  /* Don't bother from now on */
 1339         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {             /* Lock the control header */ 
 1340                 panic("mapping_prealloc - timeout getting control lock\n");     /* Tell all and die */
 1341         }
 1342 
 1343         nmapb = (size >> 12) + mapCtl.mapcmin;                                  /* Get number of entries needed for this and the minimum */
 1344         
 1345         mapCtl.mapcholdoff++;                                                                   /* Bump the hold off count */
 1346         
 1347         if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) {                  /* Do we already have enough? */
 1348                 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);            /* Unlock our stuff */
 1349                 splx(s);                                                                                        /* Restore 'rupts */
 1350                 return;
 1351         }
 1352         if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) {     /* Make sure we aren't recursing */
 1353                 hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
 1354                 splx(s);                                                                                        /* Restore 'rupts */
 1355                 return;
 1356         }
 1357         nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK;                  /* Get number of blocks to get */
 1358         
 1359         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
 1360         splx(s);                                                                                                /* Restore 'rupts */
 1361         
 1362         for(i = 0; i < nmapb; i++) {                                                    /* Allocate 'em all */
 1363                 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE);   /* Find a virtual address to use */
 1364                 if(retr != KERN_SUCCESS)                                                        /* Did we get some memory? */
 1365                         break;
 1366                 mapping_free_init((vm_offset_t)mbn, -1, 0);                     /* Initialize on to the release queue */
 1367         }
 1368         if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
 1369                 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
 1370 
 1371         mapCtl.mapcrecurse = 0;                                                                         /* We are done now */
 1372 }
 1373 
 1374 /*
 1375  *              void mapping_relpre(void) - Releases preallocation release hold off
 1376  *      
 1377  *              This routine removes the
 1378  *              hold off flag so that the adjust routine will free the extra mapping
 1379  *              blocks on the release list.  I don't like this, but I don't know
 1380  *              how else to do this for now...
 1381  *
 1382  */
 1383 
 1384 void mapping_relpre(void) {                                                                     /* Releases release hold off */
 1385 
 1386         spl_t           s;
 1387 
 1388         s = splhigh();                                                                                  /* Don't bother from now on */
 1389         if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) {             /* Lock the control header */ 
 1390                 panic("mapping_relpre - timeout getting control lock\n");       /* Tell all and die */
 1391         }
 1392         if(--mapCtl.mapcholdoff < 0) {                                                  /* Back down the hold off count */
 1393                 panic("mapping_relpre: hold-off count went negative\n");
 1394         }
 1395 
 1396         hw_lock_unlock((hw_lock_t)&mapCtl.mapclock);                    /* Unlock our stuff */
 1397         splx(s);                                                                                                /* Restore 'rupts */
 1398 }
 1399 
 1400 /*
 1401  *              void mapping_free_prime(void) - Primes the mapping block release list
 1402  *
 1403  *              See mapping_free_init.
 1404  *              No locks can be held, because we allocate memory here.
 1405  *              One processor running only.
 1406  *
 1407  */
 1408 
 1409 void mapping_free_prime(void) {                                                                 /* Primes the mapping block release list */
 1410 
 1411         int     nmapb, i;
 1412         kern_return_t   retr;
 1413         mappingblok_t   *mbn;
 1414         vm_offset_t     mapping_min;
 1415         
 1416         retr = kmem_suballoc(kernel_map, &mapping_min, sane_size / 16,
 1417                              FALSE, VM_FLAGS_ANYWHERE, &mapping_map);
 1418 
 1419         if (retr != KERN_SUCCESS)
 1420                 panic("mapping_free_prime: kmem_suballoc failed");
 1421 
 1422 
 1423         nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK;     /* Get permanent allocation */
 1424         nmapb = nmapb * 4;                                                                                      /* Get 4 times our initial allocation */
 1425 
 1426 #if DEBUG
 1427         kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n", 
 1428           mapCtl.mapcfree, mapCtl.mapcinuse, nmapb);
 1429 #endif
 1430         
 1431         for(i = 0; i < nmapb; i++) {                                                            /* Allocate 'em all */
 1432                 retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE);   /* Find a virtual address to use */
 1433                 if(retr != KERN_SUCCESS) {                                                              /* Did we get some memory? */
 1434                         panic("Whoops...  Not a bit of wired memory left for anyone\n");
 1435                 }
 1436                 mapping_free_init((vm_offset_t)mbn, -1, 0);                             /* Initialize onto release queue */
 1437         }
 1438         if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc)
 1439                 mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1));
 1440 }
 1441 
 1442 
 1443 void
 1444 mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
 1445                        vm_size_t *alloc_size, int *collectable, int *exhaustable)
 1446 {
 1447         *count      = mapCtl.mapcinuse;
 1448         *cur_size   = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln);
 1449         *max_size   = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc;
 1450         *elem_size  = (PAGE_SIZE / (MAPPERBLOK + 1));
 1451         *alloc_size = PAGE_SIZE;
 1452 
 1453         *collectable = 1;
 1454         *exhaustable = 0;
 1455 }
 1456 
 1457 
 1458 /*
 1459  *              addr64_t        mapping_p2v(pmap_t pmap, ppnum_t pa) - Finds first virtual mapping of a physical page in a space
 1460  *
 1461  *              First looks up  the physical entry associated witht the physical page.  Then searches the alias
 1462  *              list for a matching pmap.  It grabs the virtual address from the mapping, drops busy, and returns 
 1463  *              that.
 1464  *
 1465  */
 1466 
 1467 addr64_t        mapping_p2v(pmap_t pmap, ppnum_t pa) {                          /* Finds first virtual mapping of a physical page in a space */
 1468 
 1469         spl_t s;
 1470         mapping_t *mp;
 1471         unsigned int pindex;
 1472         phys_entry_t *physent;
 1473         addr64_t va;
 1474 
 1475         physent = mapping_phys_lookup(pa, &pindex);                                     /* Get physical entry */
 1476         if(!physent) {                                                                                          /* Did we find the physical page? */
 1477                 panic("mapping_p2v: invalid physical page %08X\n", pa);
 1478         }
 1479 
 1480         s = splhigh();                                                                                  /* Make sure interruptions are disabled */
 1481 
 1482         mp = hw_find_space(physent, pmap->space);                               /* Go find the first mapping to the page from the requested pmap */
 1483 
 1484         if(mp) {                                                                                                /* Did we find one? */
 1485                 va = mp->mpVAddr & -4096;                                                       /* If so, get the cleaned up vaddr */
 1486                 mapping_drop_busy(mp);                                                          /* Go ahead and relase the mapping now */
 1487         }
 1488         else va = 0;                                                                                    /* Return failure */
 1489 
 1490         splx(s);                                                                                                /* Restore 'rupts */
 1491         
 1492         return va;                                                                                              /* Bye, bye... */
 1493         
 1494 }
 1495 
 1496 /*
 1497  *      phystokv(addr)
 1498  *
 1499  *      Convert a physical address to a kernel virtual address if
 1500  *      there is a mapping, otherwise return NULL
 1501  */
 1502 
 1503 vm_offset_t phystokv(vm_offset_t pa) {
 1504 
 1505         addr64_t        va;
 1506         ppnum_t pp;
 1507 
 1508         pp = pa >> 12;                                                                                  /* Convert to a page number */
 1509         
 1510         if(!(va = mapping_p2v(kernel_pmap, pp))) {
 1511                 return 0;                                                                                       /* Can't find it, return 0... */
 1512         }
 1513         
 1514         return (va | (pa & (PAGE_SIZE - 1)));                                   /* Build and return VADDR... */
 1515 
 1516 }
 1517 
 1518 /*
 1519  *      kvtophys(addr)
 1520  *
 1521  *      Convert a kernel virtual address to a physical address
 1522  */
 1523 vm_offset_t kvtophys(vm_offset_t va) {
 1524 
 1525         return pmap_extract(kernel_pmap, va);                                   /* Find mapping and lock the physical entry for this mapping */
 1526 
 1527 }
 1528 
 1529 /*
 1530  *              void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on 
 1531  *              page 0 access for the current thread.
 1532  *
 1533  *              If parameter is TRUE, faults are ignored
 1534  *              If parameter is FALSE, faults are honored
 1535  *
 1536  */
 1537 
 1538 void ignore_zero_fault(boolean_t type) {                                /* Sets up to ignore or honor any fault on page 0 access for the current thread */
 1539 
 1540         if(type) current_thread()->machine.specFlags |= ignoreZeroFault;        /* Ignore faults on page 0 */
 1541         else     current_thread()->machine.specFlags &= ~ignoreZeroFault;       /* Honor faults on page 0 */
 1542         
 1543         return;                                                                                         /* Return the result or 0... */
 1544 }
 1545 
 1546 
 1547 /* 
 1548  *              Copies data between a physical page and a virtual page, or 2 physical.  This is used to 
 1549  *              move data from the kernel to user state. Note that the "which" parm
 1550  *              says which of the parameters is physical and if we need to flush sink/source.  
 1551  *              Note that both addresses may be physical, but only one may be virtual.
 1552  *
 1553  *              The rules are that the size can be anything.  Either address can be on any boundary
 1554  *              and span pages.  The physical data must be contiguous as must the virtual.
 1555  *
 1556  *              We can block when we try to resolve the virtual address at each page boundary.
 1557  *              We don't check protection on the physical page.
 1558  *
 1559  *              Note that we will not check the entire range and if a page translation fails,
 1560  *              we will stop with partial contents copied.
 1561  *
 1562  */
 1563  
 1564 kern_return_t hw_copypv_32(addr64_t source, addr64_t sink, unsigned int size, int which) {
 1565  
 1566         vm_map_t map;
 1567         kern_return_t ret;
 1568         addr64_t nextva, vaddr, paddr;
 1569         register mapping_t *mp;
 1570         spl_t s;
 1571         unsigned int lop, csize;
 1572         int needtran, bothphys;
 1573         unsigned int pindex;
 1574         phys_entry_t *physent;
 1575         vm_prot_t prot;
 1576         int orig_which;
 1577 
 1578         orig_which = which;
 1579 
 1580         map = (which & cppvKmap) ? kernel_map : current_map_fast();
 1581 
 1582         if((which & (cppvPsrc | cppvPsnk)) == 0 ) {             /* Make sure that only one is virtual */
 1583                 panic("copypv: no more than 1 parameter may be virtual\n");     /* Not allowed */
 1584         }
 1585         
 1586         bothphys = 1;                                                                   /* Assume both are physical */
 1587         
 1588         if(!(which & cppvPsnk)) {                                               /* Is sink page virtual? */
 1589                 vaddr = sink;                                                           /* Sink side is virtual */
 1590                 bothphys = 0;                                                           /* Show both aren't physical */
 1591                 prot = VM_PROT_READ | VM_PROT_WRITE;            /* Sink always must be read/write */
 1592         } else if (!(which & cppvPsrc)) {                               /* Is source page virtual? */
 1593                 vaddr = source;                                                         /* Source side is virtual */
 1594                 bothphys = 0;                                                           /* Show both aren't physical */
 1595                 prot = VM_PROT_READ;                                            /* Virtual source is always read only */
 1596         }
 1597 
 1598         needtran = 1;                                                                   /* Show we need to map the virtual the first time */
 1599         s = splhigh();                                                                  /* Don't bother me */
 1600 
 1601         while(size) {
 1602 
 1603                 if(!bothphys && (needtran || !(vaddr & 4095LL))) {      /* If first time or we stepped onto a new page, we need to translate */
 1604                         if(!needtran) {                                                 /* If this is not the first translation, we need to drop the old busy */
 1605                                 mapping_drop_busy(mp);                          /* Release the old mapping now */
 1606                         }
 1607                         needtran = 0;
 1608                         
 1609                         while(1) {
 1610                                 mp = mapping_find(map->pmap, vaddr, &nextva, 1);        /* Find and busy the mapping */
 1611                                 if(!mp) {                                                       /* Was it there? */
 1612                                         if(getPerProc()->istackptr == 0)
 1613                                                 panic("copypv: No vaild mapping on memory %s %x", "RD", vaddr);
 1614 
 1615                                         splx(s);                                                /* Restore the interrupt level */
 1616                                         ret = vm_fault(map, vm_map_trunc_page(vaddr), prot, FALSE, THREAD_UNINT, NULL, 0);      /* Didn't find it, try to fault it in... */
 1617                                 
 1618                                         if(ret != KERN_SUCCESS)return KERN_FAILURE;     /* Didn't find any, return no good... */
 1619                                         
 1620                                         s = splhigh();                                  /* Don't bother me */
 1621                                         continue;                                               /* Go try for the map again... */
 1622         
 1623                                 }
 1624                                 if (mp->mpVAddr & mpI) {                 /* cache inhibited, so force the appropriate page to be flushed before */
 1625                                         if (which & cppvPsrc)            /* and after the copy to avoid cache paradoxes */
 1626                                                 which |= cppvFsnk;
 1627                                         else
 1628                                                 which |= cppvFsrc;
 1629                                 } else
 1630                                         which = orig_which;
 1631 
 1632                                 /* Note that we have to have the destination writable.  So, if we already have it, or we are mapping the source,
 1633                                         we can just leave.
 1634                                 */              
 1635                                 if((which & cppvPsnk) || !(mp->mpVAddr & 1)) break;             /* We got it mapped R/W or the source is not virtual, leave... */
 1636                         
 1637                                 mapping_drop_busy(mp);                          /* Go ahead and release the mapping for now */
 1638                                 if(getPerProc()->istackptr == 0)
 1639                                         panic("copypv: No vaild mapping on memory %s %x", "RDWR", vaddr);
 1640                                 splx(s);                                                        /* Restore the interrupt level */
 1641                                 
 1642                                 ret = vm_fault(map, vm_map_trunc_page(vaddr), VM_PROT_READ | VM_PROT_WRITE, FALSE, THREAD_UNINT, NULL, 0);      /* check for a COW area */
 1643                                 if (ret != KERN_SUCCESS) return KERN_FAILURE;   /* We couldn't get it R/W, leave in disgrace... */
 1644                                 s = splhigh();                                          /* Don't bother me */
 1645                         }
 1646                         paddr = ((addr64_t)mp->mpPAddr << 12) + (vaddr - (mp->mpVAddr & -4096LL));        /* construct the physical address... this calculation works */
 1647                                                                                                           /* properly on both single page and block mappings */
 1648                         if(which & cppvPsrc) sink = paddr;              /* If source is physical, then the sink is virtual */
 1649                         else source = paddr;                                    /* Otherwise the source is */
 1650                 }
 1651                         
 1652                 lop = (unsigned int)(4096LL - (sink & 4095LL));         /* Assume sink smallest */
 1653                 if(lop > (unsigned int)(4096LL - (source & 4095LL))) lop = (unsigned int)(4096LL - (source & 4095LL));  /* No, source is smaller */
 1654                 
 1655                 csize = size;                                                           /* Assume we can copy it all */
 1656                 if(lop < size) csize = lop;                                     /* Nope, we can't do it all */
 1657                 
 1658                 if(which & cppvFsrc) flush_dcache64(source, csize, 1);  /* If requested, flush source before move */
 1659                 if(which & cppvFsnk) flush_dcache64(sink, csize, 1);    /* If requested, flush sink before move */
 1660 
 1661                 bcopy_physvir_32(source, sink, csize);                  /* Do a physical copy, virtually */
 1662                 
 1663                 if(which & cppvFsrc) flush_dcache64(source, csize, 1);  /* If requested, flush source after move */
 1664                 if(which & cppvFsnk) flush_dcache64(sink, csize, 1);    /* If requested, flush sink after move */
 1665 
 1666 /*
 1667  *              Note that for certain ram disk flavors, we may be copying outside of known memory.
 1668  *              Therefore, before we try to mark it modifed, we check if it exists.
 1669  */
 1670 
 1671                 if( !(which & cppvNoModSnk)) {
 1672                         physent = mapping_phys_lookup(sink >> 12, &pindex);     /* Get physical entry for sink */
 1673                         if(physent) mapping_set_mod((ppnum_t)(sink >> 12));             /* Make sure we know that it is modified */
 1674                 }
 1675                 if( !(which & cppvNoRefSrc)) {
 1676                         physent = mapping_phys_lookup(source >> 12, &pindex);   /* Get physical entry for source */
 1677                         if(physent) mapping_set_ref((ppnum_t)(source >> 12));           /* Make sure we know that it is modified */
 1678                 }
 1679                 size = size - csize;                                            /* Calculate what is left */
 1680                 vaddr = vaddr + csize;                                          /* Move to next sink address */
 1681                 source = source + csize;                                        /* Bump source to next physical address */
 1682                 sink = sink + csize;                                            /* Bump sink to next physical address */
 1683         }
 1684         
 1685         if(!bothphys) mapping_drop_busy(mp);                    /* Go ahead and release the mapping of the virtual page if any */
 1686         splx(s);                                                                                /* Open up for interrupts */
 1687 
 1688         return KERN_SUCCESS;
 1689 }
 1690 
 1691 
 1692 /*
 1693  *      Debug code 
 1694  */
 1695 
 1696 void mapping_verify(void) {
 1697 
 1698         spl_t           s;
 1699         mappingblok_t   *mb, *mbn;
 1700         unsigned int    relncnt;
 1701         unsigned int    dumbodude;
 1702 
 1703         dumbodude = 0;
 1704         
 1705         s = splhigh();                                                                                  /* Don't bother from now on */
 1706 
 1707         mbn = 0;                                                                                                /* Start with none */
 1708         for(mb = mapCtl.mapcnext; mb; mb = mb->nextblok) {              /* Walk the free chain */
 1709                 if((mappingblok_t *)(mb->mapblokflags & 0x7FFFFFFF) != mb) {    /* Is tag ok? */
 1710                         panic("mapping_verify: flags tag bad, free chain; mb = %08X, tag = %08X\n", mb, mb->mapblokflags);
 1711                 }
 1712                 mbn = mb;                                                                                       /* Remember the last one */
 1713         }
 1714         
 1715         if(mapCtl.mapcnext && (mapCtl.mapclast != mbn)) {               /* Do we point to the last one? */
 1716                 panic("mapping_verify: last pointer bad; mb = %08X, mapclast = %08X\n", mb, mapCtl.mapclast);
 1717         }
 1718         
 1719         relncnt = 0;                                                                                    /* Clear count */
 1720         for(mb = mapCtl.mapcrel; mb; mb = mb->nextblok) {               /* Walk the release chain */
 1721                 dumbodude |= mb->mapblokflags;                                          /* Just touch it to make sure it is mapped */
 1722                 relncnt++;                                                                                      /* Count this one */
 1723         }
 1724         
 1725         if(mapCtl.mapcreln != relncnt) {                                                        /* Is the count on release queue ok? */
 1726                 panic("mapping_verify: bad release queue count; mapcreln = %d, cnt = %d, ignore this = %08X\n", mapCtl.mapcreln, relncnt, dumbodude);
 1727         }
 1728 
 1729         splx(s);                                                                                                /* Restore 'rupts */
 1730 
 1731         return;
 1732 }
 1733 
 1734 void mapping_phys_unused(ppnum_t pa) {
 1735 
 1736         unsigned int pindex;
 1737         phys_entry_t *physent;
 1738 
 1739         physent = mapping_phys_lookup(pa, &pindex);                             /* Get physical entry */
 1740         if(!physent) return;                                                                    /* Did we find the physical page? */
 1741 
 1742         if(!(physent->ppLink & ~(ppLock | ppFlags))) return;    /* No one else is here */
 1743         
 1744         panic("mapping_phys_unused: physical page (%08X) in use, physent = %08X\n", pa, physent);
 1745         
 1746 }
 1747         
 1748 void mapping_hibernate_flush(void)
 1749 {
 1750     int bank;
 1751     unsigned int page;
 1752     struct phys_entry * entry;
 1753 
 1754     for (bank = 0; bank < pmap_mem_regions_count; bank++)
 1755     {
 1756         entry = (struct phys_entry *) pmap_mem_regions[bank].mrPhysTab;
 1757         for (page = pmap_mem_regions[bank].mrStart; page <= pmap_mem_regions[bank].mrEnd; page++)
 1758         {
 1759             hw_walk_phys(entry, hwpNoop, hwpNoop, hwpNoop, 0, hwpPurgePTE);
 1760             entry++;
 1761         }
 1762     }
 1763 }
 1764 
 1765 
 1766 
 1767 
 1768 
 1769 

Cache object: 9ad3c3bc9461f88774735584c55a438c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.