The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/vmachmon.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 /*-----------------------------------------------------------------------
   23 ** vmachmon.c
   24 **
   25 ** C routines that we are adding to the MacOS X kernel.
   26 **
   27 -----------------------------------------------------------------------*/
   28 
   29 #include <mach/mach_types.h>
   30 #include <mach/kern_return.h>
   31 #include <mach/host_info.h>
   32 #include <kern/kern_types.h>
   33 #include <kern/kalloc.h>
   34 #include <kern/host.h>
   35 #include <kern/task.h>
   36 #include <kern/thread.h>
   37 #include <ppc/exception.h>
   38 #include <ppc/mappings.h>
   39 #include <ppc/thread.h>
   40 #include <vm/vm_kern.h>
   41 #include <vm/vm_fault.h>
   42 
   43 #include <ppc/vmachmon.h>
   44 #include <ppc/lowglobals.h>
   45 
   46 extern double FloatInit;
   47 extern unsigned long QNaNbarbarian[4];
   48 
   49 /*************************************************************************************
   50         Virtual Machine Monitor Internal Routines
   51 **************************************************************************************/
   52 
   53 /*-----------------------------------------------------------------------
   54 ** vmm_get_entry
   55 **
   56 ** This function verifies and return a vmm context entry index
   57 **
   58 ** Inputs:
   59 **              act - pointer to current thread activation
   60 **              index - index into vmm control table (this is a "one based" value)
   61 **
   62 ** Outputs:
   63 **              address of a vmmCntrlEntry or 0 if not found
   64 -----------------------------------------------------------------------*/
   65 
   66 static vmmCntrlEntry *vmm_get_entry(
   67         thread_t                        act,
   68         vmm_thread_index_t      index)
   69 {
   70         vmmCntrlTable *CTable;
   71         vmmCntrlEntry *CEntry;
   72 
   73         index = index & vmmTInum;                                                               /* Clean up the index */
   74 
   75         if (act->machine.vmmControl == 0) return NULL;                  /* No control table means no vmm */
   76         if ((index - 1) >= kVmmMaxContexts) return NULL;                /* Index not in range */        
   77 
   78         CTable = act->machine.vmmControl;                                               /* Make the address a bit more convienient */
   79         CEntry = &CTable->vmmc[index - 1];                                              /* Point to the entry */
   80         
   81         if (!(CEntry->vmmFlags & vmmInUse)) return NULL;                /* See if the slot is actually in use */
   82         
   83         return CEntry;
   84 }
   85 
   86 /*-----------------------------------------------------------------------
   87 ** vmm_get_adsp
   88 **
   89 ** This function verifies and returns the pmap for an address space.
   90 ** If there is none and the request is valid, a pmap will be created.
   91 **
   92 ** Inputs:
   93 **              act - pointer to current thread activation
   94 **              index - index into vmm control table (this is a "one based" value)
   95 **
   96 ** Outputs:
   97 **              address of a pmap or 0 if not found or could no be created
   98 **              Note that if there is no pmap for the address space it will be created.
   99 -----------------------------------------------------------------------*/
  100 
  101 static pmap_t vmm_get_adsp(thread_t act, vmm_thread_index_t index)
  102 {
  103         pmap_t pmap;
  104 
  105         if (act->machine.vmmControl == 0) return NULL;                  /* No control table means no vmm */
  106         if ((index - 1) >= kVmmMaxContexts) return NULL;                /* Index not in range */        
  107 
  108         pmap = act->machine.vmmControl->vmmAdsp[index - 1];             /* Get the pmap */
  109         return (pmap);                                                                                  /*  and return it. */
  110 }
  111 
  112 /*-----------------------------------------------------------------------
  113 ** vmm_build_shadow_hash
  114 **
  115 ** Allocate and initialize a shadow hash table.
  116 **
  117 ** This function assumes that PAGE_SIZE is 4k-bytes.
  118 **
  119 -----------------------------------------------------------------------*/
  120 static pmap_vmm_ext *vmm_build_shadow_hash(pmap_t pmap)
  121 {
  122         pmap_vmm_ext   *ext;                                                                    /* VMM pmap extension we're building */
  123         ppnum_t                 extPP;                                                                  /* VMM pmap extension physical page number */
  124         kern_return_t   ret;                                                                    /* Return code from various calls */
  125         uint32_t                pages = GV_HPAGES;                                              /* Number of pages in the hash table */
  126         vm_offset_t             free = VMX_HPIDX_OFFSET;                                /* Offset into extension page of free area (128-byte aligned) */
  127         uint32_t                freeSize  = PAGE_SIZE - free;                   /* Number of free bytes in the extension page */
  128                                                                                                                         
  129         if ((pages * sizeof(addr64_t)) + (pages * sizeof(vm_offset_t)) > freeSize) {
  130                 panic("vmm_build_shadow_hash: too little pmap_vmm_ext free space\n");
  131         }
  132         
  133         ret = kmem_alloc_wired(kernel_map, (vm_offset_t *)&ext, PAGE_SIZE);
  134                                                                                                                         /* Allocate a page-sized extension block */
  135         if (ret != KERN_SUCCESS) return (NULL);                                 /* Return NULL for failed allocate */
  136         bzero((char *)ext, PAGE_SIZE);                                                  /* Zero the entire extension block page */
  137         
  138         extPP = pmap_find_phys(kernel_pmap, (vm_offset_t)ext);
  139                                                                                                                         /* Get extension block's physical page number */
  140         if (!extPP) {                                                                                   /* This should not fail, but then again... */
  141                 panic("vmm_build_shadow_hash: could not translate pmap_vmm_ext vaddr %08X\n", ext);
  142         }
  143         
  144         ext->vmxSalt         = (addr64_t)(vm_offset_t)ext ^ ptoa_64(extPP);
  145                                                                                                                         /* Set effective<->physical conversion salt */
  146         ext->vmxHostPmapPhys = (addr64_t)(vm_offset_t)pmap ^ pmap->pmapvr;
  147                                                                                                                         /* Set host pmap's physical address */
  148         ext->vmxHostPmap     = pmap;                                                    /* Set host pmap's effective address */
  149         ext->vmxHashPgIdx    = (addr64_t *)((vm_offset_t)ext + VMX_HPIDX_OFFSET);
  150                                                                                                                         /* Allocate physical index */
  151         ext->vmxHashPgList       = (vm_offset_t *)((vm_offset_t)ext + VMX_HPLIST_OFFSET);
  152                                                                                                                         /* Allocate page list */
  153         ext->vmxActiveBitmap = (vm_offset_t *)((vm_offset_t)ext + VMX_ACTMAP_OFFSET);
  154                                                                                                                         /* Allocate active mapping bitmap */
  155         
  156         /* The hash table is typically larger than a single page, but we don't require it to be in a
  157            contiguous virtual or physical chunk. So, we allocate it page by page, noting the effective and
  158            physical address of each page in vmxHashPgList and vmxHashPgIdx, respectively. */
  159         uint32_t        idx;
  160         for (idx = 0; idx < pages; idx++) {
  161                 ret = kmem_alloc_wired(kernel_map, &ext->vmxHashPgList[idx], PAGE_SIZE);
  162                                                                                                                         /* Allocate a hash-table page */
  163                 if (ret != KERN_SUCCESS) goto fail;                                     /* Allocation failed, exit through cleanup */
  164                 bzero((char *)ext->vmxHashPgList[idx], PAGE_SIZE);      /* Zero the page */
  165                 ext->vmxHashPgIdx[idx] = ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)ext->vmxHashPgList[idx]));
  166                                                                                                                         /* Put page's physical address into index */
  167                 if (!ext->vmxHashPgIdx[idx]) {                                          /* Hash-table page's LRA failed */
  168                         panic("vmm_build_shadow_hash: could not translate hash-table vaddr %08X\n", ext->vmxHashPgList[idx]);
  169                 }
  170                 mapping_t *map = (mapping_t *)ext->vmxHashPgList[idx];
  171                 uint32_t mapIdx;
  172                 for (mapIdx = 0; mapIdx < GV_SLTS_PPG; mapIdx++) {      /* Iterate over mappings in this page */
  173                         map->mpFlags = (mpGuest | mpgFree);                             /* Mark guest type and free */
  174                         map = (mapping_t *)((char *)map + GV_SLOT_SZ);  /* Next slot-sized mapping */
  175                 }
  176         }
  177         
  178         return (ext);                                                                                   /* Return newly-minted VMM pmap extension */
  179         
  180 fail:
  181         for (idx = 0; idx < pages; idx++) {                                             /* De-allocate any pages we managed to allocate */
  182                 if (ext->vmxHashPgList[idx]) {
  183                         kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
  184                 }
  185         }
  186         kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE);             /* Release the VMM pmap extension page */
  187         return (NULL);                                                                                  /* Return NULL for failure */
  188 }
  189 
  190 
  191 /*-----------------------------------------------------------------------
  192 ** vmm_release_shadow_hash
  193 **
  194 ** Release shadow hash table and VMM extension block
  195 **
  196 -----------------------------------------------------------------------*/
  197 static void vmm_release_shadow_hash(pmap_vmm_ext *ext)
  198 {
  199         uint32_t                idx;
  200 
  201         for (idx = 0; idx < GV_HPAGES; idx++) {                                 /* Release the hash table page by page */
  202                 kmem_free(kernel_map, ext->vmxHashPgList[idx], PAGE_SIZE);
  203         }
  204 
  205         kmem_free(kernel_map, (vm_offset_t)ext, PAGE_SIZE);             /* Release the VMM pmap extension page */
  206 }
  207 
  208 /*-----------------------------------------------------------------------
  209 ** vmm_activate_gsa
  210 **
  211 ** Activate guest shadow assist
  212 **
  213 -----------------------------------------------------------------------*/
  214 static kern_return_t vmm_activate_gsa(
  215         thread_t                        act,
  216         vmm_thread_index_t      index)
  217 {
  218         vmmCntrlTable   *CTable = act->machine.vmmControl;              /* Get VMM control table */
  219         if (!CTable) {                                                                                  /* Caller guarantees that this will work */
  220                 panic("vmm_activate_gsa: VMM control table not present; act = %08X, idx = %d\n",
  221                         act, index);
  222                 return KERN_FAILURE;
  223         }
  224         vmmCntrlEntry   *CEntry = vmm_get_entry(act, index);    /* Get context from index */
  225         if (!CEntry) {                                                                                  /* Caller guarantees that this will work */
  226                 panic("vmm_activate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
  227                         act, index);
  228                 return KERN_FAILURE;
  229         }
  230 
  231         pmap_t  hpmap = act->map->pmap;                                                 /* Get host pmap */
  232         pmap_t  gpmap = vmm_get_adsp(act, index);                               /* Get guest pmap */
  233         if (!gpmap) {                                                                                   /* Caller guarantees that this will work */
  234                 panic("vmm_activate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
  235                         act, index);
  236                 return KERN_FAILURE;
  237         }
  238         
  239         if (!hpmap->pmapVmmExt) {                                                               /* If there's no VMM extension for this host, create one */
  240                 hpmap->pmapVmmExt = vmm_build_shadow_hash(hpmap);       /* Build VMM extension plus shadow hash and attach */
  241                 if (hpmap->pmapVmmExt) {                                                        /* See if we succeeded */
  242                         hpmap->pmapVmmExtPhys = (addr64_t)(vm_offset_t)hpmap->pmapVmmExt ^ hpmap->pmapVmmExt->vmxSalt;
  243                                                                                                                         /* Get VMM extensions block physical address */
  244                 } else {
  245                         return KERN_RESOURCE_SHORTAGE;                                  /* Not enough mojo to go */
  246                 }
  247         }
  248         gpmap->pmapVmmExt = hpmap->pmapVmmExt;                                  /* Copy VMM extension block virtual address into guest */
  249         gpmap->pmapVmmExtPhys = hpmap->pmapVmmExtPhys;                  /*  and its physical address, too */
  250         gpmap->pmapFlags |= pmapVMgsaa;                                                 /* Enable GSA for this guest */
  251         CEntry->vmmXAFlgs |= vmmGSA;                                                    /* Show GSA active here, too */
  252 
  253         return KERN_SUCCESS;
  254 }
  255 
  256 
  257 /*-----------------------------------------------------------------------
  258 ** vmm_deactivate_gsa
  259 **
  260 ** Deactivate guest shadow assist
  261 **
  262 -----------------------------------------------------------------------*/
  263 static void vmm_deactivate_gsa(
  264         thread_t                        act,
  265         vmm_thread_index_t      index)
  266 {
  267         vmmCntrlEntry   *CEntry = vmm_get_entry(act, index);    /* Get context from index */
  268         if (!CEntry) {                                                                                  /* Caller guarantees that this will work */
  269                 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
  270                         act, index);
  271                 return KERN_FAILURE;
  272         }
  273 
  274         pmap_t  gpmap = vmm_get_adsp(act, index);                               /* Get guest pmap */
  275         if (!gpmap) {                                                                                   /* Caller guarantees that this will work */
  276                 panic("vmm_deactivate_gsa: Unexpected failure of vmm_get_adsp; act = %08X, idx = %d\n",
  277                         act, index);
  278                 return KERN_FAILURE;
  279         }
  280         
  281         gpmap->pmapFlags &= ~pmapVMgsaa;                                                /* Deactivate GSA for this guest */
  282         CEntry->vmmXAFlgs &= ~vmmGSA;                                                   /* Show GSA deactivated here, too */
  283 }
  284 
  285 
  286 /*-----------------------------------------------------------------------
  287 ** vmm_flush_context
  288 **
  289 ** Flush specified guest context, purging all guest mappings and clearing
  290 ** the context page.
  291 **
  292 -----------------------------------------------------------------------*/
  293 static void vmm_flush_context(
  294         thread_t                        act,
  295         vmm_thread_index_t      index)
  296 {
  297         vmmCntrlEntry           *CEntry;
  298         vmmCntrlTable           *CTable;
  299         vmm_state_page_t        *vks;
  300         vmm_version_t           version;
  301 
  302         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */
  303         if (!CEntry) {                                                                  /* Caller guarantees that this will work */
  304                 panic("vmm_flush_context: Unexpected failure of vmm_get_entry; act = %08X, idx = %d\n",
  305                         act, index);
  306                 return;
  307         }
  308 
  309         if(CEntry->vmmFacCtx.FPUsave) {                                 /* Is there any floating point context? */
  310                 toss_live_fpu(&CEntry->vmmFacCtx);                      /* Get rid of any live context here */
  311                 save_release((savearea *)CEntry->vmmFacCtx.FPUsave);    /* Release it */
  312         }
  313 
  314         if(CEntry->vmmFacCtx.VMXsave) {                                 /* Is there any vector context? */
  315                 toss_live_vec(&CEntry->vmmFacCtx);                      /* Get rid of any live context here */
  316                 save_release((savearea *)CEntry->vmmFacCtx.VMXsave);    /* Release it */
  317         }
  318         
  319         vmm_unmap_all_pages(act, index);                                /* Blow away all mappings for this context */
  320 
  321         CTable = act->machine.vmmControl;                               /* Get the control table address */
  322         CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp;   /* Make sure we don't try to automap into this */
  323         
  324         CEntry->vmmFlags &= vmmInUse;                                   /* Clear out all of the flags for this entry except in use */
  325         CEntry->vmmFacCtx.FPUsave = 0;                                  /* Clear facility context control */
  326         CEntry->vmmFacCtx.FPUlevel = 0;                                 /* Clear facility context control */
  327         CEntry->vmmFacCtx.FPUcpu = 0;                                   /* Clear facility context control */
  328         CEntry->vmmFacCtx.VMXsave = 0;                                  /* Clear facility context control */
  329         CEntry->vmmFacCtx.VMXlevel = 0;                                 /* Clear facility context control */
  330         CEntry->vmmFacCtx.VMXcpu = 0;                                   /* Clear facility context control */
  331         
  332         vks = CEntry->vmmContextKern;                                   /* Get address of the context page */
  333         version = vks->interface_version;                               /* Save the version code */
  334         bzero((char *)vks, 4096);                                               /* Clear all */
  335 
  336         vks->interface_version = version;                               /* Set our version code */
  337         vks->thread_index = index % vmmTInum;                   /* Tell the user the index for this virtual machine */
  338                 
  339         return;                                                                                 /* Context is now flushed */
  340 }
  341 
  342 
  343 /*************************************************************************************
  344         Virtual Machine Monitor Exported Functionality
  345         
  346         The following routines are used to implement a quick-switch mechanism for
  347         virtual machines that need to execute within their own processor envinroment
  348         (including register and MMU state).
  349 **************************************************************************************/
  350 
  351 /*-----------------------------------------------------------------------
  352 ** vmm_get_version
  353 **
  354 ** This function returns the current version of the virtual machine
  355 ** interface. It is divided into two portions. The top 16 bits
  356 ** represent the major version number, and the bottom 16 bits
  357 ** represent the minor version number. Clients using the Vmm
  358 ** functionality should make sure they are using a verison new
  359 ** enough for them.
  360 **
  361 ** Inputs:
  362 **              none
  363 **
  364 ** Outputs:
  365 **              32-bit number representing major/minor version of 
  366 **                              the Vmm module
  367 -----------------------------------------------------------------------*/
  368 
  369 int vmm_get_version(struct savearea *save)
  370 {
  371         save->save_r3 = kVmmCurrentVersion;             /* Return the version */
  372         return 1;
  373 }
  374 
  375 
  376 /*-----------------------------------------------------------------------
  377 ** Vmm_get_features
  378 **
  379 ** This function returns a set of flags that represents the functionality
  380 ** supported by the current verison of the Vmm interface. Clients should
  381 ** use this to determine whether they can run on this system.
  382 **
  383 ** Inputs:
  384 **              none
  385 **
  386 ** Outputs:
  387 **              32-bit number representing functionality supported by this
  388 **                              version of the Vmm module
  389 -----------------------------------------------------------------------*/
  390 
  391 int vmm_get_features(struct savearea *save)
  392 {
  393         save->save_r3 = kVmmCurrentFeatures;            /* Return the features */
  394         if(getPerProc()->pf.Available & pf64Bit) {
  395                 save->save_r3 &= ~kVmmFeature_LittleEndian;     /* No little endian here */
  396                 save->save_r3 |= kVmmFeature_SixtyFourBit;      /* Set that we can do 64-bit */
  397         }
  398         return 1;
  399 }
  400 
  401 
  402 /*-----------------------------------------------------------------------
  403 ** vmm_max_addr
  404 **
  405 ** This function returns the maximum addressable virtual address sported
  406 **
  407 ** Outputs:
  408 **              Returns max address
  409 -----------------------------------------------------------------------*/
  410 
  411 addr64_t vmm_max_addr(thread_t act) 
  412 {
  413         return vm_max_address;                                                  /* Return the maximum address */
  414 }
  415 
  416 /*-----------------------------------------------------------------------
  417 ** vmm_get_XA
  418 **
  419 ** This function retrieves the eXtended Architecture flags for the specifed VM.
  420 ** 
  421 ** We need to return the result in the return code rather than in the return parameters
  422 ** because we need an architecture independent format so the results are actually 
  423 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
  424 ** 4 for 32-bit. 
  425 ** 
  426 **
  427 ** Inputs:
  428 **              act - pointer to current thread activation structure
  429 **              index - index returned by vmm_init_context
  430 **
  431 ** Outputs:
  432 **              Return code is set to the XA flags.  If the index is invalid or the
  433 **              context has not been created, we return 0.
  434 -----------------------------------------------------------------------*/
  435 
  436 unsigned int vmm_get_XA(
  437         thread_t                        act,
  438         vmm_thread_index_t      index)
  439 {
  440         vmmCntrlEntry           *CEntry;
  441 
  442         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
  443         if (CEntry == NULL) return 0;                                   /* Either this isn't a vmm or the index is bogus */
  444         
  445         return CEntry->vmmXAFlgs;                                               /* Return the flags */
  446 }
  447 
  448 /*-----------------------------------------------------------------------
  449 ** vmm_init_context
  450 **
  451 ** This function initializes an  emulation context. It allocates
  452 ** a new pmap (address space) and fills in the initial processor
  453 ** state within the specified structure. The structure, mapped
  454 ** into the client's logical address space, must be page-aligned.
  455 **
  456 ** Inputs:
  457 **              act - pointer to current thread activation
  458 **              version - requested version of the Vmm interface (allowing
  459 **                      future versions of the interface to change, but still
  460 **                      support older clients)
  461 **              vmm_user_state - pointer to a logical page within the
  462 **                      client's address space
  463 **
  464 ** Outputs:
  465 **              kernel return code indicating success or failure
  466 -----------------------------------------------------------------------*/
  467 
  468 int vmm_init_context(struct savearea *save)
  469 {
  470 
  471         thread_t                        act;
  472         vmm_version_t           version;
  473         vmm_state_page_t *      vmm_user_state;
  474         vmmCntrlTable           *CTable;
  475         vm_offset_t                     conkern;
  476         vmm_state_page_t *      vks;
  477         ppnum_t                         conphys;
  478         kern_return_t           ret;
  479         int                                     cvi, i;
  480     task_t                              task;
  481     thread_t                    fact, gact;
  482 
  483         vmm_user_state = CAST_DOWN(vmm_state_page_t *, save->save_r4);  /* Get the user address of the comm area */
  484         if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) {   /* Make sure the comm area is page aligned */
  485                 save->save_r3 = KERN_FAILURE;                   /* Return failure */
  486                 return 1;
  487         }
  488 
  489         /* Make sure that the version requested is supported */
  490         version = save->save_r3;                                        /* Pick up passed in version */
  491         if (((version >> 16) < kVmmMinMajorVersion) || ((version >> 16) > (kVmmCurrentVersion >> 16))) {
  492                 save->save_r3 = KERN_FAILURE;                   /* Return failure */
  493                 return 1;
  494         }
  495 
  496         if((version & 0xFFFF) > kVmmCurMinorVersion) {  /* Check for valid minor */
  497                 save->save_r3 = KERN_FAILURE;                   /* Return failure */
  498                 return 1;
  499         }
  500 
  501         act = current_thread();                                         /* Pick up our activation */
  502         
  503         ml_set_interrupts_enabled(TRUE);                        /* This can take a bit of time so pass interruptions */
  504         
  505         task = current_task();                                          /* Figure out who we are */
  506 
  507         task_lock(task);                                                        /* Lock our task */
  508 
  509         fact = (thread_t)task->threads.next;    /* Get the first activation on task */
  510         gact = 0;                                                                       /* Pretend we didn't find it yet */
  511 
  512         for(i = 0; i < task->thread_count; i++) {       /* All of the activations */
  513                 if(fact->machine.vmmControl) {                          /* Is this a virtual machine monitor? */
  514                         gact = fact;                                            /* Yeah... */
  515                         break;                                                          /* Bail the loop... */
  516                 }
  517                 fact = (thread_t)fact->task_threads.next;       /* Go to the next one */
  518         }
  519         
  520 
  521 /*
  522  *      We only allow one thread per task to be a virtual machine monitor right now.  This solves
  523  *      a number of potential problems that I can't put my finger on right now.
  524  *
  525  *      Utlimately, I think we want to move the controls and make all this task based instead of
  526  *      thread based.  That would allow an emulator architecture to spawn a kernel thread for each
  527  *      VM (if they want) rather than hand dispatch contexts.
  528  */
  529 
  530         if(gact && (gact != act)) {                                     /* Check if another thread is a vmm or trying to be */
  531                 task_unlock(task);                                              /* Release task lock */
  532                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
  533                 save->save_r3 = KERN_FAILURE;                   /* We must play alone... */
  534                 return 1;
  535         }
  536         
  537         if(!gact) act->machine.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */
  538 
  539         task_unlock(task);                                                      /* Safe to release now (because we've marked ourselves) */
  540 
  541         CTable = act->machine.vmmControl;                               /* Get the control table address */
  542         if ((unsigned int)CTable == 1) {                        /* If we are marked, try to allocate a new table, otherwise we have one */
  543                 if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) {        /* Get a fresh emulation control table */
  544                         act->machine.vmmControl = 0;                    /* Unmark us as vmm 'cause we failed */
  545                         ml_set_interrupts_enabled(FALSE);       /* Set back interruptions */
  546                         save->save_r3 = KERN_RESOURCE_SHORTAGE;         /* No storage... */
  547                         return 1;
  548                 }
  549                 
  550                 bzero((void *)CTable, sizeof(vmmCntrlTable));   /* Clean it up */
  551                 act->machine.vmmControl = CTable;                       /* Initialize the table anchor */
  552         }
  553 
  554         for(cvi = 0; cvi < kVmmMaxContexts; cvi++) {    /* Search to find a free slot */
  555                 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break;     /* Bail if we find an unused slot */
  556         }
  557         
  558         if(cvi >= kVmmMaxContexts) {                            /* Did we find one? */
  559                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
  560                 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */ 
  561                 return 1;
  562         }
  563 
  564         ret = vm_map_wire(                                                      /* Wire the virtual machine monitor's context area */
  565                 act->map,
  566                 (vm_offset_t)vmm_user_state,
  567                 (vm_offset_t)vmm_user_state + PAGE_SIZE,
  568                 VM_PROT_READ | VM_PROT_WRITE,
  569                 FALSE);                                                                                                                 
  570                 
  571         if (ret != KERN_SUCCESS)                                        /* The wire failed, return the code */
  572                 goto return_in_shame;
  573 
  574         /* Map the vmm state into the kernel's address space. */
  575         conphys = pmap_find_phys(act->map->pmap, (addr64_t)((uintptr_t)vmm_user_state));
  576 
  577         /* Find a virtual address to use. */
  578         ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE);
  579         if (ret != KERN_SUCCESS) {                                      /* Did we find an address? */
  580                 (void) vm_map_unwire(act->map,                  /* No, unwire the context area */
  581                         (vm_offset_t)vmm_user_state,
  582                         (vm_offset_t)vmm_user_state + PAGE_SIZE,
  583                         TRUE);
  584                 goto return_in_shame;
  585         }
  586         
  587         /* Map it into the kernel's address space. */
  588 
  589         pmap_enter(kernel_pmap, conkern, conphys, 
  590                 VM_PROT_READ | VM_PROT_WRITE, 
  591                 VM_WIMG_USE_DEFAULT, TRUE);
  592         
  593         /* Clear the vmm state structure. */
  594         vks = (vmm_state_page_t *)conkern;
  595         bzero((char *)vks, PAGE_SIZE);
  596         
  597         
  598         /* We're home free now. Simply fill in the necessary info and return. */
  599         
  600         vks->interface_version = version;                       /* Set our version code */
  601         vks->thread_index = cvi + 1;                            /* Tell the user the index for this virtual machine */
  602         
  603         CTable->vmmc[cvi].vmmFlags = vmmInUse;          /* Mark the slot in use and make sure the rest are clear */
  604         CTable->vmmc[cvi].vmmContextKern = vks;         /* Remember the kernel address of comm area */
  605         CTable->vmmc[cvi].vmmContextPhys = conphys;     /* Remember the state page physical addr */
  606         CTable->vmmc[cvi].vmmContextUser = vmm_user_state;              /* Remember user address of comm area */
  607         
  608         CTable->vmmc[cvi].vmmFacCtx.FPUsave = 0;        /* Clear facility context control */
  609         CTable->vmmc[cvi].vmmFacCtx.FPUlevel = 0;       /* Clear facility context control */
  610         CTable->vmmc[cvi].vmmFacCtx.FPUcpu = 0;         /* Clear facility context control */
  611         CTable->vmmc[cvi].vmmFacCtx.VMXsave = 0;        /* Clear facility context control */
  612         CTable->vmmc[cvi].vmmFacCtx.VMXlevel = 0;       /* Clear facility context control */
  613         CTable->vmmc[cvi].vmmFacCtx.VMXcpu = 0;         /* Clear facility context control */
  614         CTable->vmmc[cvi].vmmFacCtx.facAct = act;       /* Point back to the activation */
  615 
  616         hw_atomic_add((int *)&saveanchor.savetarget, 2);        /* Account for the number of extra saveareas we think we might "need" */
  617 
  618         pmap_t hpmap = act->map->pmap;                                          /* Get host pmap */
  619         pmap_t gpmap = pmap_create(0);                                          /* Make a fresh guest pmap */
  620         if (gpmap) {                                                                            /* Did we succeed ? */
  621                 CTable->vmmAdsp[cvi] = gpmap;                                   /* Remember guest pmap for new context */
  622                 if (lowGlo.lgVMMforcedFeats & vmmGSA) {                 /* Forcing on guest shadow assist ? */
  623                         vmm_activate_gsa(act, cvi+1);                           /* Activate GSA */ 
  624                 }
  625         } else {
  626                 ret = KERN_RESOURCE_SHORTAGE;                                   /* We've failed to allocate a guest pmap */
  627                 goto return_in_shame;                                                   /* Shame on us. */
  628         }
  629 
  630         if (!(hpmap->pmapFlags & pmapVMhost)) {                         /* Do this stuff if this is our first time hosting */
  631                 hpmap->pmapFlags |= pmapVMhost;                                 /* We're now hosting */
  632         }
  633         
  634         ml_set_interrupts_enabled(FALSE);                       /* Set back interruptions */
  635         save->save_r3 = KERN_SUCCESS;                           /* Hip, hip, horay... */        
  636         return 1;
  637 
  638 return_in_shame:
  639         if(!gact) kfree(CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */
  640         act->machine.vmmControl = 0;                                    /* Unmark us as vmm 'cause we failed */
  641         ml_set_interrupts_enabled(FALSE);                       /* Set back interruptions */
  642         save->save_r3 = ret;                                            /* Pass back return code... */  
  643         return 1;
  644 
  645 }
  646 
  647 
  648 /*-----------------------------------------------------------------------
  649 ** vmm_tear_down_context
  650 **
  651 ** This function uninitializes an emulation context. It deallocates
  652 ** internal resources associated with the context block.
  653 **
  654 ** Inputs:
  655 **              act - pointer to current thread activation structure
  656 **              index - index returned by vmm_init_context
  657 **
  658 ** Outputs:
  659 **              kernel return code indicating success or failure
  660 **
  661 ** Strangeness note:
  662 **              This call will also trash the address space with the same ID.  While this 
  663 **              is really not too cool, we have to do it because we need to make
  664 **              sure that old VMM users (not that we really have any) who depend upon 
  665 **              the address space going away with the context still work the same.
  666 -----------------------------------------------------------------------*/
  667 
  668 kern_return_t vmm_tear_down_context(
  669         thread_t                        act,
  670         vmm_thread_index_t      index)
  671 {
  672         vmmCntrlEntry           *CEntry;
  673         vmmCntrlTable           *CTable;
  674         int                                     cvi;
  675         register savearea       *sv;
  676 
  677         CEntry = vmm_get_entry(act, index);                                     /* Convert index to entry */            
  678         if (CEntry == NULL) return KERN_FAILURE;                        /* Either this isn't vmm thread or the index is bogus */
  679 
  680         ml_set_interrupts_enabled(TRUE);                                        /* This can take a bit of time so pass interruptions */
  681 
  682         hw_atomic_sub((int *)&saveanchor.savetarget, 2);        /* We don't need these extra saveareas anymore */
  683 
  684         if(CEntry->vmmFacCtx.FPUsave) {                                         /* Is there any floating point context? */
  685                 toss_live_fpu(&CEntry->vmmFacCtx);                              /* Get rid of any live context here */
  686                 save_release((savearea *)CEntry->vmmFacCtx.FPUsave);    /* Release it */
  687         }
  688 
  689         if(CEntry->vmmFacCtx.VMXsave) {                                         /* Is there any vector context? */
  690                 toss_live_vec(&CEntry->vmmFacCtx);                              /* Get rid of any live context here */
  691                 save_release((savearea *)CEntry->vmmFacCtx.VMXsave);    /* Release it */
  692         }
  693         
  694         CEntry->vmmPmap = 0;                                                            /* Remove this trace */
  695         pmap_t gpmap = act->machine.vmmControl->vmmAdsp[index - 1];
  696                                                                                                                 /* Get context's guest pmap (if any) */
  697         if (gpmap) {                                                                            /* Check if there is an address space assigned here */
  698                 if (gpmap->pmapFlags & pmapVMgsaa) {                    /* Handle guest shadow assist case specially */
  699                         hw_rem_all_gv(gpmap);                                           /* Remove all guest mappings from shadow hash table */
  700                 } else {
  701                         mapping_remove(gpmap, 0xFFFFFFFFFFFFF000LL);/* Remove final page explicitly because we might have mapped it */  
  702                         pmap_remove(gpmap, 0, 0xFFFFFFFFFFFFF000LL);/* Remove all entries from this map */
  703                 }
  704                 pmap_destroy(gpmap);                                                    /* Toss the pmap for this context */
  705                 act->machine.vmmControl->vmmAdsp[index - 1] = NULL;     /* Clean it up */
  706         }
  707         
  708         (void) vm_map_unwire(                                                   /* Unwire the user comm page */
  709                 act->map,
  710                 (vm_offset_t)CEntry->vmmContextUser,
  711                 (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE,
  712                 FALSE);
  713         
  714         kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE);  /* Remove kernel's view of the comm page */
  715         
  716         CTable = act->machine.vmmControl;                                       /* Get the control table address */
  717         CTable->vmmGFlags = CTable->vmmGFlags & ~vmmLastAdSp;   /* Make sure we don't try to automap into this */
  718 
  719         CEntry->vmmFlags = 0;                                                   /* Clear out all of the flags for this entry including in use */
  720         CEntry->vmmContextKern = 0;                                             /* Clear the kernel address of comm area */
  721         CEntry->vmmContextUser = 0;                                             /* Clear the user address of comm area */
  722         
  723         CEntry->vmmFacCtx.FPUsave = 0;                                  /* Clear facility context control */
  724         CEntry->vmmFacCtx.FPUlevel = 0;                                 /* Clear facility context control */
  725         CEntry->vmmFacCtx.FPUcpu = 0;                                   /* Clear facility context control */
  726         CEntry->vmmFacCtx.VMXsave = 0;                                  /* Clear facility context control */
  727         CEntry->vmmFacCtx.VMXlevel = 0;                                 /* Clear facility context control */
  728         CEntry->vmmFacCtx.VMXcpu = 0;                                   /* Clear facility context control */
  729         CEntry->vmmFacCtx.facAct = 0;                                   /* Clear facility context control */
  730         
  731         for(cvi = 0; cvi < kVmmMaxContexts; cvi++) {    /* Search to find a free slot */
  732                 if(CTable->vmmc[cvi].vmmFlags & vmmInUse) {     /* Return if there are still some in use */
  733                         ml_set_interrupts_enabled(FALSE);               /* No more interruptions */
  734                         return KERN_SUCCESS;                                    /* Leave... */
  735                 }
  736         }
  737 
  738 /*
  739  *      When we have tossed the last context, toss any address spaces left over before releasing
  740  *      the VMM control block 
  741  */
  742 
  743         for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) {   /* Look at all slots */
  744                 if(!act->machine.vmmControl->vmmAdsp[index - 1]) continue;      /* Nothing to remove here */
  745                 mapping_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0xFFFFFFFFFFFFF000LL);      /* Remove final page explicitly because we might have mapped it */      
  746                 pmap_remove(act->machine.vmmControl->vmmAdsp[index - 1], 0, 0xFFFFFFFFFFFFF000LL);      /* Remove all entries from this map */
  747                 pmap_destroy(act->machine.vmmControl->vmmAdsp[index - 1]);      /* Toss the pmap for this context */
  748                 act->machine.vmmControl->vmmAdsp[index - 1] = 0;        /* Clear just in case */
  749         }
  750         
  751         pmap_t pmap = act->map->pmap;                                   /* Get our pmap */
  752         if (pmap->pmapVmmExt) {                                                 /* Release any VMM pmap extension block and shadow hash table */
  753                 vmm_release_shadow_hash(pmap->pmapVmmExt);      /* Release extension block and shadow hash table */
  754                 pmap->pmapVmmExt     = 0;                                       /* Forget extension block */
  755                 pmap->pmapVmmExtPhys = 0;                                       /* Forget extension block's physical address, too */
  756         }
  757         pmap->pmapFlags &= ~pmapVMhost;                                 /* We're no longer hosting */
  758 
  759         kfree(CTable, sizeof(vmmCntrlTable));   /* Toss the table because to tossed the last context */
  760         act->machine.vmmControl = 0;                                            /* Unmark us as vmm */
  761 
  762         ml_set_interrupts_enabled(FALSE);                               /* No more interruptions */
  763         
  764         return KERN_SUCCESS;
  765 }
  766 
  767 
  768 /*-----------------------------------------------------------------------
  769 ** vmm_activate_XA
  770 **
  771 ** This function activates the eXtended Architecture flags for the specifed VM.
  772 ** 
  773 ** We need to return the result in the return code rather than in the return parameters
  774 ** because we need an architecture independent format so the results are actually 
  775 ** usable by the host. For example, the return parameters for 64-bit are 8 bytes wide vs.
  776 ** 4 for 32-bit. 
  777 ** 
  778 ** Note that this function does a lot of the same stuff as vmm_tear_down_context
  779 ** and vmm_init_context.
  780 **
  781 ** Inputs:
  782 **              act - pointer to current thread activation structure
  783 **              index - index returned by vmm_init_context
  784 **              flags - the extended architecture flags
  785 **              
  786 **
  787 ** Outputs:
  788 **              KERN_SUCCESS if vm is valid and initialized. KERN_FAILURE if not.
  789 **              Also, the internal flags are set and, additionally, the VM is completely reset.
  790 -----------------------------------------------------------------------*/
  791 kern_return_t vmm_activate_XA(
  792         thread_t                        act,
  793         vmm_thread_index_t      index,
  794         unsigned int xaflags)
  795 {
  796         vmmCntrlEntry           *CEntry;
  797         kern_return_t           result  = KERN_SUCCESS;         /* Assume success */
  798 
  799         if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (!getPerProc()->pf.Available & pf64Bit)))
  800                 return (KERN_FAILURE);                                          /* Unknown or unsupported feature requested */
  801                 
  802         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
  803         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't a vmm or the index is bogus */
  804 
  805         ml_set_interrupts_enabled(TRUE);                                /* This can take a bit of time so pass interruptions */
  806         
  807         vmm_flush_context(act, index);                                  /* Flush the context */
  808 
  809         if (xaflags & vmm64Bit) {                                               /* Activating 64-bit mode ? */  
  810                 CEntry->vmmXAFlgs |= vmm64Bit;                          /* Activate 64-bit mode */
  811         }
  812         
  813         if (xaflags & vmmGSA) {                                                 /* Activating guest shadow assist ? */
  814                 result = vmm_activate_gsa(act, index);          /* Activate guest shadow assist */
  815         }
  816         
  817         ml_set_interrupts_enabled(FALSE);                               /* No more interruptions */
  818         
  819         return result;                                                                  /* Return activate result */
  820 }
  821 
  822 /*-----------------------------------------------------------------------
  823 ** vmm_deactivate_XA
  824 **
  825 -----------------------------------------------------------------------*/
  826 kern_return_t vmm_deactivate_XA(
  827         thread_t                        act,
  828         vmm_thread_index_t      index,
  829         unsigned int xaflags)
  830 {
  831         vmmCntrlEntry           *CEntry;
  832         kern_return_t           result  = KERN_SUCCESS;         /* Assume success */
  833 
  834         if ((xaflags & ~kVmmSupportedSetXA) || ((xaflags & vmm64Bit) && (getPerProc()->pf.Available & pf64Bit)))
  835                 return (KERN_FAILURE);                                          /* Unknown or unsupported feature requested */
  836                 
  837         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
  838         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't a vmm or the index is bogus */
  839 
  840         ml_set_interrupts_enabled(TRUE);                                /* This can take a bit of time so pass interruptions */
  841         
  842         vmm_flush_context(act, index);                                  /* Flush the context */
  843 
  844         if (xaflags & vmm64Bit) {                                               /* Deactivating 64-bit mode ? */        
  845                 CEntry->vmmXAFlgs &= ~vmm64Bit;                         /* Deactivate 64-bit mode */
  846         }
  847         
  848         if (xaflags & vmmGSA) {                                                 /* Deactivating guest shadow assist ? */
  849                 vmm_deactivate_gsa(act, index);                         /* Deactivate guest shadow assist */
  850         }
  851         
  852         ml_set_interrupts_enabled(FALSE);                               /* No more interruptions */
  853         
  854         return result;                                                                  /* Return deactivate result */
  855 }
  856 
  857 
  858 /*-----------------------------------------------------------------------
  859 ** vmm_tear_down_all
  860 **
  861 ** This function uninitializes all emulation contexts. If there are
  862 ** any vmm contexts, it calls vmm_tear_down_context for each one.
  863 **
  864 ** Note: this can also be called from normal thread termination.  Because of
  865 ** that, we will context switch out of an alternate if we are currenty in it.
  866 ** It will be terminated with no valid return code set because we don't expect 
  867 ** the activation to ever run again.
  868 **
  869 ** Inputs:
  870 **              activation to tear down
  871 **
  872 ** Outputs:
  873 **              All vmm contexts released and VMM shut down
  874 -----------------------------------------------------------------------*/
  875 void vmm_tear_down_all(thread_t act) {
  876 
  877         vmmCntrlTable           *CTable;
  878         int                                     cvi;
  879         kern_return_t           ret;
  880         savearea                        *save;
  881         spl_t                           s;
  882         
  883         if(act->machine.specFlags & runningVM) {                        /* Are we actually in a context right now? */
  884                 save = find_user_regs(act);                                     /* Find the user state context */
  885                 if(!save) {                                                                     /* Did we find it? */
  886                         panic("vmm_tear_down_all: runningVM marked but no user state context\n");
  887                         return;
  888                 }
  889                 
  890                 save->save_exception = kVmmBogusContext*4;      /* Indicate that this context is bogus now */
  891                 s = splhigh();                                                          /* Make sure interrupts are off */
  892                 vmm_force_exit(act, save);                                      /* Force and exit from VM state */
  893                 splx(s);                                                                        /* Restore interrupts */
  894         }
  895         
  896         if(CTable = act->machine.vmmControl) {                          /* Do we have a vmm control block? */
  897 
  898 
  899                 for(cvi = 1; cvi <= kVmmMaxContexts; cvi++) {   /* Look at all slots */
  900                         if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */
  901                                 ret = vmm_tear_down_context(act, cvi);  /* Take down the found context */
  902                                 if(ret != KERN_SUCCESS) {                       /* Did it go away? */
  903                                         panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n",
  904                                           ret, act, cvi);
  905                                 }
  906                         }
  907                 }               
  908 
  909 /*
  910  *              Note that all address apces should be gone here.
  911  */
  912                 if(act->machine.vmmControl) {                                           /* Did we find one? */
  913                         panic("vmm_tear_down_all: control table did not get deallocated\n");    /* Table did not go away */
  914                 }
  915         }
  916 
  917         return;
  918 }
  919 
  920 /*-----------------------------------------------------------------------
  921 ** vmm_map_page
  922 **
  923 ** This function maps a page from within the client's logical
  924 ** address space into the alternate address space.
  925 **
  926 ** The page need not be locked or resident.  If not resident, it will be faulted
  927 ** in by this code, which may take some time.   Also, if the page is not locked,
  928 ** it, and this mapping may disappear at any time, even before it gets used.  Note also
  929 ** that reference and change information is NOT preserved when a page is unmapped, either
  930 ** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address
  931 ** space).  This means that if RC is needed, the page MUST be wired.
  932 **
  933 ** Note that if there is already a mapping at the address, it is removed and all 
  934 ** information (including RC) is lost BEFORE an attempt is made to map it. Also,
  935 ** if the map call fails, the old address is still unmapped..
  936 **
  937 ** Inputs:
  938 **              act   - pointer to current thread activation
  939 **              index - index of address space to map into
  940 **              va    - virtual address within the client's address
  941 **                          space
  942 **              ava   - virtual address within the alternate address
  943 **                          space
  944 **              prot - protection flags
  945 **
  946 **      Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped
  947 **  areas are not allowed and will fail. Same with directly mapped I/O areas.
  948 **
  949 ** Input conditions:
  950 **      Interrupts disabled (from fast trap)
  951 **
  952 ** Outputs:
  953 **              kernel return code indicating success or failure
  954 **      if success, va resident and alternate mapping made
  955 -----------------------------------------------------------------------*/
  956 
  957 kern_return_t vmm_map_page(
  958         thread_t                        act,
  959         vmm_adsp_id_t           index,
  960         addr64_t                        cva,
  961         addr64_t                        ava,
  962         vm_prot_t                       prot)
  963 {
  964         kern_return_t           ret;
  965         register mapping_t      *mp;
  966         vm_map_t                        map;
  967         addr64_t                        ova, nextva;
  968         pmap_t                          pmap;
  969 
  970         pmap = vmm_get_adsp(act, index);                        /* Get the guest pmap for this address space */
  971         if(!pmap) return KERN_FAILURE;                          /* Bogus address space, no VMs, or we can't make a pmap, failure... */
  972 
  973         if(ava > vm_max_address) return kVmmInvalidAddress;     /* Does the machine support an address of this size? */
  974 
  975         map = current_thread()->map;                            /* Get the host's map */
  976 
  977         if (pmap->pmapFlags & pmapVMgsaa) {                     /* Guest shadow assist active ? */
  978                 ret = hw_res_map_gv(map->pmap, pmap, cva, ava, getProtPPC(prot));
  979                                                                                                 /* Attempt to resume an existing gv->phys mapping */
  980                 if (mapRtOK != ret) {                                   /* Nothing to resume, construct a new mapping */
  981                         
  982                         while (1) {                                                     /* Find host mapping or fail */
  983                                 mp = mapping_find(map->pmap, cva, &nextva, 0);
  984                                                                                                 /* Attempt to find host mapping and pin it */
  985                                 if (mp) break;                                  /* Got it */
  986                                 
  987                                 ml_set_interrupts_enabled(TRUE);
  988                                                                                                 /* Open 'rupt window */
  989                                 ret = vm_fault(map,                             /* Didn't find it, try to fault in host page read/write */
  990                                         vm_map_trunc_page(cva), 
  991                                         VM_PROT_READ | VM_PROT_WRITE,
  992                                         FALSE, /* change wiring */
  993                                         THREAD_UNINT,
  994                                         NULL,
  995                                         0);
  996                                 ml_set_interrupts_enabled(FALSE);
  997                                                                                                 /* Close 'rupt window */
  998                                 if (ret != KERN_SUCCESS)
  999                                         return KERN_FAILURE;            /* Fault failed, return failure */
 1000                         }
 1001                         
 1002                         if (mpNormal != (mp->mpFlags & mpType)) {
 1003                                                                                                 /* Host mapping must be a vanilla page */
 1004                                 mapping_drop_busy(mp);                  /* Un-pin host mapping */
 1005                                 return KERN_FAILURE;                    /* Return failure */
 1006                         }
 1007         
 1008                                                                                                 /* Partially construct gv->phys mapping */
 1009                         unsigned int  pindex;
 1010                         phys_entry_t *physent = mapping_phys_lookup(mp->mpPAddr, &pindex);
 1011                         if (!physent) {
 1012                                 mapping_drop_busy(mp);
 1013                                 return KERN_FAILURE;
 1014                         }
 1015                         unsigned int pattr = ((physent->ppLink & (ppI | ppG)) >> 60);
 1016                         unsigned int wimg = 0x2;
 1017                         if (pattr & mmFlgCInhib)  wimg |= 0x4;
 1018                         if (pattr & mmFlgGuarded) wimg |= 0x1;
 1019                         unsigned int mflags = (pindex << 16) | mpGuest;
 1020                         addr64_t         gva = ((ava & ~mpHWFlags) | (wimg << 3) | getProtPPC(prot));
 1021                         
 1022                         hw_add_map_gv(map->pmap, pmap, gva, mflags, mp->mpPAddr);
 1023                                                                                                 /* Construct new guest->phys mapping */
 1024                         
 1025                         mapping_drop_busy(mp);                          /* Un-pin host mapping */
 1026                 }
 1027         } else {
 1028                 while(1) {                                                              /* Keep trying until we get it or until we fail */
 1029         
 1030                         mp = mapping_find(map->pmap, cva, &nextva, 0);  /* Find the mapping for this address */
 1031                         
 1032                         if(mp) break;                                           /* We found it */
 1033         
 1034                         ml_set_interrupts_enabled(TRUE);        /* Enable interruptions */
 1035                         ret = vm_fault(map,                                     /* Didn't find it, try to fault it in read/write... */
 1036                                         vm_map_trunc_page(cva), 
 1037                                         VM_PROT_READ | VM_PROT_WRITE,
 1038                                         FALSE, /*change wiring */
 1039                                         THREAD_UNINT,
 1040                                         NULL,
 1041                                         0);
 1042                         ml_set_interrupts_enabled(FALSE);       /* Disable interruptions */
 1043                         if (ret != KERN_SUCCESS) return KERN_FAILURE;   /* There isn't a page there, return... */
 1044                 }
 1045         
 1046                 if((mp->mpFlags & mpType) != mpNormal) {        /* If this is a block, a nest, or some other special thing, we can't map it */
 1047                         mapping_drop_busy(mp);                          /* We have everything we need from the mapping */
 1048                         return KERN_FAILURE;                            /* Leave in shame */
 1049                 }
 1050                 
 1051                 while(1) {                                                              /* Keep trying the enter until it goes in */
 1052                         ova = mapping_make(pmap, ava, mp->mpPAddr, 0, 1, prot); /* Enter the mapping into the pmap */
 1053                         if(!ova) break;                                         /* If there were no collisions, we are done... */
 1054                         mapping_remove(pmap, ova);                      /* Remove the mapping that collided */
 1055                 }
 1056         
 1057                 mapping_drop_busy(mp);                                  /* We have everything we need from the mapping */
 1058         }
 1059 
 1060         if (!((getPerProc()->spcFlags) & FamVMmode)) {
 1061                 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL;      /* Remember the last mapping we made */
 1062                 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index;       /* Remember last address space */
 1063         }
 1064 
 1065         return KERN_SUCCESS;
 1066 }
 1067 
 1068 
 1069 /*-----------------------------------------------------------------------
 1070 ** vmm_map_execute
 1071 **
 1072 ** This function maps a page from within the client's logical
 1073 ** address space into the alternate address space of the
 1074 ** Virtual Machine Monitor context and then directly starts executing.
 1075 **
 1076 **      See description of vmm_map_page for details. 
 1077 **
 1078 ** Inputs:
 1079 **              Index is used for both the context and the address space ID.
 1080 **              index[24:31] is the context id and index[16:23] is the address space.
 1081 **              if the address space ID is 0, the context ID is used for it.
 1082 **
 1083 ** Outputs:
 1084 **              Normal exit is to run the VM.  Abnormal exit is triggered via a 
 1085 **              non-KERN_SUCCESS return from vmm_map_page or later during the 
 1086 **              attempt to transition into the VM. 
 1087 -----------------------------------------------------------------------*/
 1088 
 1089 vmm_return_code_t vmm_map_execute(
 1090         thread_t                        act,
 1091         vmm_thread_index_t      index,
 1092         addr64_t                        cva,
 1093         addr64_t                        ava,
 1094         vm_prot_t                       prot)
 1095 {
 1096         kern_return_t           ret;
 1097         vmmCntrlEntry           *CEntry;
 1098         unsigned int            adsp;
 1099         vmm_thread_index_t      cndx;
 1100 
 1101         cndx = index & 0xFF;                                                    /* Clean it up */
 1102 
 1103         CEntry = vmm_get_entry(act, cndx);                              /* Get and validate the index */
 1104         if (CEntry == NULL) return kVmmBogusContext;    /* Return bogus context */
 1105         
 1106         if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
 1107                 return kVmmBogusContext;                        /* Yes, invalid index in Fam */
 1108         
 1109         adsp = (index >> 8) & 0xFF;                                             /* Get any requested address space */
 1110         if(!adsp) adsp = (index & 0xFF);                                /* If 0, use context ID as address space ID */
 1111         
 1112         ret = vmm_map_page(act, adsp, cva, ava, prot);  /* Go try to map the page on in */
 1113         
 1114         
 1115         if(ret == KERN_SUCCESS) {
 1116                 act->machine.vmmControl->vmmLastMap = ava & 0xFFFFFFFFFFFFF000ULL;      /* Remember the last mapping we made */
 1117                 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx;        /* Remember last address space */
 1118                 vmm_execute_vm(act, cndx);                              /* Return was ok, launch the VM */
 1119         }
 1120         
 1121         return ret;                                                                             /* We had trouble mapping in the page */        
 1122         
 1123 }
 1124 
 1125 /*-----------------------------------------------------------------------
 1126 ** vmm_map_list
 1127 **
 1128 ** This function maps a list of pages into various address spaces
 1129 **
 1130 ** Inputs:
 1131 **              act   - pointer to current thread activation
 1132 **              index - index of default address space (used if not specifed in list entry
 1133 **              count - number of pages to release
 1134 **              flavor - 0 if 32-bit version, 1 if 64-bit
 1135 **              vmcpComm in the comm page contains up to kVmmMaxMapPages to map
 1136 **
 1137 ** Outputs:
 1138 **              kernel return code indicating success or failure
 1139 **              KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
 1140 **              or the vmm_map_page call fails.
 1141 **              We return kVmmInvalidAddress if virtual address size is not supported
 1142 -----------------------------------------------------------------------*/
 1143 
 1144 kern_return_t vmm_map_list(
 1145         thread_t                        act,
 1146         vmm_adsp_id_t           index,
 1147         unsigned int            cnt,
 1148         unsigned int            flavor)
 1149 {
 1150         vmmCntrlEntry           *CEntry;
 1151         boolean_t                       ret;
 1152         unsigned int            i;
 1153         vmmMList                        *lst;
 1154         vmmMList64                      *lstx;
 1155         addr64_t                        cva;
 1156         addr64_t                        ava;
 1157         vm_prot_t                       prot;
 1158         vmm_adsp_id_t           adsp;
 1159 
 1160         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
 1161         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't a vmm or the index is bogus */
 1162         
 1163         if(cnt > kVmmMaxMapPages) return KERN_FAILURE;  /* They tried to map too many */
 1164         if(!cnt) return KERN_SUCCESS;                                   /* If they said none, we're done... */
 1165         
 1166         lst = (vmmMList *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0];    /* Point to the first entry */
 1167         lstx = (vmmMList64 *)&((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0]; /* Point to the first entry */
 1168         
 1169         for(i = 0; i < cnt; i++) {                                              /* Step and release all pages in list */
 1170                 if(flavor) {                                                            /* Check if 32- or 64-bit addresses */
 1171                         cva = lstx[i].vmlva;                                    /* Get the 64-bit actual address */     
 1172                         ava = lstx[i].vmlava;                                   /* Get the 64-bit guest address */      
 1173                 }
 1174                 else {
 1175                         cva = lst[i].vmlva;                                             /* Get the 32-bit actual address */     
 1176                         ava = lst[i].vmlava;                                    /* Get the 32-bit guest address */      
 1177                 }
 1178 
 1179                 prot = ava & vmmlProt;                                          /* Extract the protection bits */       
 1180                 adsp = (ava & vmmlAdID) >> 4;                           /* Extract an explicit address space request */ 
 1181                 if(!adsp) adsp = index - 1;                                     /* If no explicit, use supplied default */
 1182                 ava = ava &= 0xFFFFFFFFFFFFF000ULL;                     /* Clean up the address */
 1183                 
 1184                 ret = vmm_map_page(act, index, cva, ava, prot); /* Go try to map the page on in */
 1185                 if(ret != KERN_SUCCESS) return ret;                     /* Bail if any error */
 1186         }
 1187         
 1188         return KERN_SUCCESS     ;                                                       /* Return... */
 1189 }
 1190 
 1191 /*-----------------------------------------------------------------------
 1192 ** vmm_get_page_mapping
 1193 **
 1194 ** Given a context index and a guest virtual address, convert the address
 1195 ** to its corresponding host virtual address.
 1196 **
 1197 ** Inputs:
 1198 **              act   - pointer to current thread activation
 1199 **              index - context index
 1200 **              gva   - guest virtual address 
 1201 **
 1202 ** Outputs:
 1203 **              Host virtual address (page aligned) or -1 if not mapped or any failure
 1204 **
 1205 ** Note:
 1206 **              If the host address space contains multiple virtual addresses mapping
 1207 **              to the physical address corresponding to the specified guest virtual
 1208 **              address (i.e., host virtual aliases), it is unpredictable which host
 1209 **              virtual address (alias) will be returned. Moral of the story: No host
 1210 **              virtual aliases.
 1211 -----------------------------------------------------------------------*/
 1212 
 1213 addr64_t vmm_get_page_mapping(
 1214         thread_t                        act,
 1215         vmm_adsp_id_t           index,
 1216         addr64_t                        gva)
 1217 {
 1218         register mapping_t      *mp;
 1219         pmap_t                          pmap;
 1220         addr64_t                        nextva, hva;
 1221         ppnum_t                         pa;
 1222 
 1223         pmap = vmm_get_adsp(act, index);                                /* Get and validate the index */
 1224         if (!pmap)return -1;                                                    /* No good, failure... */
 1225         
 1226         if (pmap->pmapFlags & pmapVMgsaa) {                             /* Guest shadow assist (GSA) active ? */
 1227                 return (hw_gva_to_hva(pmap, gva));                      /* Convert guest to host virtual address */                     
 1228         } else {
 1229                 mp = mapping_find(pmap, gva, &nextva, 0);       /* Find guest mapping for this virtual address */
 1230         
 1231                 if(!mp) return -1;                                                      /* Not mapped, return -1 */
 1232 
 1233                 pa = mp->mpPAddr;                                                       /* Remember the physical page address */
 1234 
 1235                 mapping_drop_busy(mp);                                          /* Go ahead and relase the mapping now */
 1236         
 1237                 pmap = current_thread()->map->pmap;                     /* Get the host pmap */
 1238                 hva = mapping_p2v(pmap, pa);                            /* Now find the source virtual */
 1239 
 1240                 if(hva != 0) return hva;                                        /* We found it... */
 1241         
 1242                 panic("vmm_get_page_mapping: could not back-map guest va (%016llX)\n", gva);
 1243                                                                                                         /* We are bad wrong if we can't find it */
 1244 
 1245                 return -1;                                                                      /* Never executed, prevents compiler warning */
 1246         }
 1247 }
 1248 
 1249 /*-----------------------------------------------------------------------
 1250 ** vmm_unmap_page
 1251 **
 1252 ** This function unmaps a page from the guest address space.
 1253 **
 1254 ** Inputs:
 1255 **              act   - pointer to current thread activation
 1256 **              index - index of vmm state for this page
 1257 **              va    - virtual address within the vmm's address
 1258 **                          space
 1259 **
 1260 ** Outputs:
 1261 **              kernel return code indicating success or failure
 1262 -----------------------------------------------------------------------*/
 1263 
 1264 kern_return_t vmm_unmap_page(
 1265         thread_t                        act,
 1266         vmm_adsp_id_t           index,
 1267         addr64_t                        va)
 1268 {
 1269         vmmCntrlEntry           *CEntry;
 1270         addr64_t                        nadd;
 1271         pmap_t                          pmap;
 1272 
 1273         pmap = vmm_get_adsp(act, index);                                                /* Get and validate the index */
 1274         if (!pmap)return -1;                                                                    /* No good, failure... */
 1275         
 1276         if (pmap->pmapFlags & pmapVMgsaa) {                                             /* Handle guest shadow assist specially */
 1277                 hw_susp_map_gv(act->map->pmap, pmap, va);                       /* Suspend the mapping */
 1278                 return (KERN_SUCCESS);                                                          /* Always returns success */
 1279         } else {
 1280                 nadd = mapping_remove(pmap, va);                                        /* Toss the mapping */
 1281                 
 1282                 return ((nadd & 1) ? KERN_FAILURE : KERN_SUCCESS);      /* Return... */
 1283         }
 1284 }
 1285 
 1286 /*-----------------------------------------------------------------------
 1287 ** vmm_unmap_list
 1288 **
 1289 ** This function unmaps a list of pages from the alternate's logical
 1290 ** address space.
 1291 **
 1292 ** Inputs:
 1293 **              act   - pointer to current thread activation
 1294 **              index - index of vmm state for this page
 1295 **              count - number of pages to release
 1296 **              flavor - 0 if 32-bit, 1 if 64-bit
 1297 **              vmcpComm in the comm page contains up to kVmmMaxUnmapPages to unmap
 1298 **
 1299 ** Outputs:
 1300 **              kernel return code indicating success or failure
 1301 **              KERN_FAILURE is returned if kVmmMaxUnmapPages is exceeded
 1302 -----------------------------------------------------------------------*/
 1303 
 1304 kern_return_t vmm_unmap_list(
 1305         thread_t                        act,
 1306         vmm_adsp_id_t           index,
 1307         unsigned int            cnt,
 1308         unsigned int            flavor)
 1309 {
 1310         vmmCntrlEntry           *CEntry;
 1311         boolean_t                       ret;
 1312         kern_return_t           kern_result = KERN_SUCCESS;
 1313         unsigned int            *pgaddr, i;
 1314         addr64_t                        gva;
 1315         vmmUMList                       *lst;
 1316         vmmUMList64                     *lstx;
 1317         pmap_t                          pmap;
 1318         int                                     adsp;
 1319 
 1320         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
 1321         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't a vmm or the index is bogus */
 1322         
 1323         if(cnt > kVmmMaxUnmapPages) return KERN_FAILURE;        /* They tried to unmap too many */
 1324         if(!cnt) return KERN_SUCCESS;                                   /* If they said none, we're done... */
 1325         
 1326         lst = (vmmUMList *)lstx = (vmmUMList64 *) &((vmm_comm_page_t *)CEntry->vmmContextKern)->vmcpComm[0];    /* Point to the first entry */
 1327         
 1328         for(i = 0; i < cnt; i++) {                                              /* Step and release all pages in list */
 1329                 if(flavor) {                                                            /* Check if 32- or 64-bit addresses */
 1330                         gva = lstx[i].vmlava;                                   /* Get the 64-bit guest address */      
 1331                 }
 1332                 else {
 1333                         gva = lst[i].vmlava;                                    /* Get the 32-bit guest address */      
 1334                 }
 1335 
 1336                 adsp = (gva & vmmlAdID) >> 4;                           /* Extract an explicit address space request */ 
 1337                 if(!adsp) adsp = index - 1;                                     /* If no explicit, use supplied default */
 1338                 pmap = act->machine.vmmControl->vmmAdsp[adsp];  /* Get the pmap for this request */
 1339                 if(!pmap) continue;                                                     /* Ain't nuthin' mapped here, no durn map... */
 1340 
 1341                 gva = gva &= 0xFFFFFFFFFFFFF000ULL;                     /* Clean up the address */      
 1342                 if (pmap->pmapFlags & pmapVMgsaa) {                     /* Handle guest shadow assist specially */
 1343                         hw_susp_map_gv(act->map->pmap, pmap, gva);
 1344                                                                                                         /* Suspend the mapping */
 1345                 } else {
 1346                         (void)mapping_remove(pmap, gva);                /* Toss the mapping */
 1347                 }
 1348         }
 1349         
 1350         return KERN_SUCCESS     ;                                                       /* Return... */
 1351 }
 1352 
 1353 /*-----------------------------------------------------------------------
 1354 ** vmm_unmap_all_pages
 1355 **
 1356 ** This function unmaps all pages from the alternates's logical
 1357 ** address space.
 1358 **
 1359 ** Inputs:
 1360 **              act   - pointer to current thread activation
 1361 **              index - index of context state
 1362 **
 1363 ** Outputs:
 1364 **              none
 1365 **
 1366 ** Note:
 1367 **      All pages are unmapped, but the address space (i.e., pmap) is still alive
 1368 -----------------------------------------------------------------------*/
 1369 
 1370 void vmm_unmap_all_pages(
 1371         thread_t                        act,
 1372         vmm_adsp_id_t           index)
 1373 {
 1374         vmmCntrlEntry           *CEntry;
 1375         pmap_t                          pmap;
 1376 
 1377         pmap = vmm_get_adsp(act, index);                                                /* Convert index to entry */            
 1378         if (!pmap) return;                                                                              /* Either this isn't vmm thread or the index is bogus */
 1379 
 1380         if (pmap->pmapFlags & pmapVMgsaa) {                                             /* Handle guest shadow assist specially */
 1381                 hw_rem_all_gv(pmap);                                                            /* Remove all guest's mappings from shadow hash table */
 1382         } else {
 1383                 /*
 1384                  *      Note: the pmap code won't deal with the last page in the address space, so handle it explicitly
 1385                  */
 1386                 mapping_remove(pmap, 0xFFFFFFFFFFFFF000LL);                     /* Remove final page explicitly because we might have mapped it */      
 1387                 pmap_remove(pmap, 0, 0xFFFFFFFFFFFFF000LL);                     /* Remove all entries from this map */
 1388         }
 1389         return;
 1390 }
 1391 
 1392 
 1393 /*-----------------------------------------------------------------------
 1394 ** vmm_get_page_dirty_flag
 1395 **
 1396 ** This function returns the changed flag of the page
 1397 ** and optionally clears clears the flag.
 1398 **
 1399 ** Inputs:
 1400 **              act   - pointer to current thread activation
 1401 **              index - index of vmm state for this page
 1402 **              va    - virtual address within the vmm's address
 1403 **                          space
 1404 **              reset - Clears dirty if true, untouched if not
 1405 **
 1406 ** Outputs:
 1407 **              the dirty bit
 1408 **              clears the dirty bit in the pte if requested
 1409 **
 1410 **      Note:
 1411 **              The RC bits are merged into the global physical entry
 1412 -----------------------------------------------------------------------*/
 1413 
 1414 boolean_t vmm_get_page_dirty_flag(
 1415         thread_t                        act,
 1416         vmm_adsp_id_t           index,
 1417         addr64_t                        va,
 1418         unsigned int            reset)
 1419 {
 1420         vmmCntrlEntry           *CEntry;
 1421         register mapping_t      *mpv, *mp;
 1422         unsigned int            RC;
 1423         pmap_t                          pmap;
 1424 
 1425         pmap = vmm_get_adsp(act, index);                                                /* Convert index to entry */            
 1426         if (!pmap) return 1;                                                                    /* Either this isn't vmm thread or the index is bogus */
 1427 
 1428         if (pmap->pmapFlags & pmapVMgsaa) {                                             /* Handle guest shadow assist specially */
 1429                 RC = hw_test_rc_gv(act->map->pmap, pmap, va, reset);/* Fetch the RC bits and clear if requested */      
 1430         } else {
 1431                 RC = hw_test_rc(pmap, (addr64_t)va, reset);                     /* Fetch the RC bits and clear if requested */
 1432         }
 1433 
 1434         switch (RC & mapRetCode) {                                                              /* Decode return code */
 1435         
 1436                 case mapRtOK:                                                                           /* Changed */
 1437                         return ((RC & (unsigned int)mpC) == (unsigned int)mpC); /* Return if dirty or not */
 1438                         break;
 1439         
 1440                 case mapRtNotFnd:                                                                       /* Didn't find it */
 1441                         return 1;                                                                               /* Return dirty */
 1442                         break;
 1443                         
 1444                 default:
 1445                         panic("vmm_get_page_dirty_flag: hw_test_rc failed - rc = %d, pmap = %08X, va = %016llX\n", RC, pmap, va);
 1446                 
 1447         }
 1448 
 1449         return 1;                                                                                               /* Return the change bit */
 1450 }
 1451 
 1452 
 1453 /*-----------------------------------------------------------------------
 1454 ** vmm_protect_page
 1455 **
 1456 ** This function sets the protection bits of a mapped page
 1457 **
 1458 ** Inputs:
 1459 **              act   - pointer to current thread activation
 1460 **              index - index of vmm state for this page
 1461 **              va    - virtual address within the vmm's address
 1462 **                          space
 1463 **              prot  - Protection flags
 1464 **
 1465 ** Outputs:
 1466 **              none
 1467 **              Protection bits of the mapping are modifed
 1468 **
 1469 -----------------------------------------------------------------------*/
 1470 
 1471 kern_return_t vmm_protect_page(
 1472         thread_t                        act,
 1473         vmm_adsp_id_t           index,
 1474         addr64_t                        va,
 1475         vm_prot_t                       prot)
 1476 {
 1477         vmmCntrlEntry           *CEntry;
 1478         addr64_t                        nextva;
 1479         int     ret;
 1480         pmap_t                          pmap;
 1481 
 1482         pmap = vmm_get_adsp(act, index);                                                /* Convert index to entry */            
 1483         if (!pmap) return KERN_FAILURE;                                                 /* Either this isn't vmm thread or the index is bogus */
 1484         
 1485         if (pmap->pmapFlags & pmapVMgsaa) {                                             /* Handle guest shadow assist specially */
 1486                 ret = hw_protect_gv(pmap, va, prot);                            /* Try to change protection, GSA varient */
 1487         } else {
 1488                 ret = hw_protect(pmap, va, prot, &nextva);                      /* Try to change protection */
 1489         }
 1490 
 1491         switch (ret) {                                                                                  /* Decode return code */
 1492         
 1493                 case mapRtOK:                                                                           /* All ok... */
 1494                         break;                                                                                  /* Outta here */
 1495                         
 1496                 case mapRtNotFnd:                                                                       /* Didn't find it */
 1497                         return KERN_SUCCESS;                                                    /* Ok, return... */
 1498                         break;
 1499                         
 1500                 default:
 1501                         panic("vmm_protect_page: hw_protect failed - rc = %d, pmap = %08X, va = %016llX\n", ret, pmap, (addr64_t)va);
 1502                 
 1503         }
 1504 
 1505         if (!((getPerProc()->spcFlags) & FamVMmode)) {
 1506                 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL;       /* Remember the last mapping we made */
 1507                 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | index;       /* Remember last address space */
 1508         }
 1509 
 1510         return KERN_SUCCESS;                                                                    /* Return */
 1511 }
 1512 
 1513 
 1514 /*-----------------------------------------------------------------------
 1515 ** vmm_protect_execute
 1516 **
 1517 ** This function sets the protection bits of a mapped page
 1518 ** and then directly starts executing.
 1519 **
 1520 **      See description of vmm_protect_page for details
 1521 **
 1522 ** Inputs:
 1523 **              See vmm_protect_page and vmm_map_execute
 1524 **
 1525 ** Outputs:
 1526 **              Normal exit is to run the VM.  Abnormal exit is triggered via a 
 1527 **              non-KERN_SUCCESS return from vmm_map_page or later during the 
 1528 **              attempt to transition into the VM. 
 1529 -----------------------------------------------------------------------*/
 1530 
 1531 vmm_return_code_t vmm_protect_execute(
 1532         thread_t                        act,
 1533         vmm_thread_index_t      index,
 1534         addr64_t                        va,
 1535         vm_prot_t                       prot)
 1536 {
 1537         kern_return_t           ret;
 1538         vmmCntrlEntry           *CEntry;
 1539         unsigned int            adsp;
 1540         vmm_thread_index_t      cndx;
 1541 
 1542         cndx = index & 0xFF;                                                    /* Clean it up */
 1543         CEntry = vmm_get_entry(act, cndx);                              /* Get and validate the index */
 1544         if (CEntry == NULL) return kVmmBogusContext;    /* Return bogus context */
 1545         
 1546         adsp = (index >> 8) & 0xFF;                                             /* Get any requested address space */
 1547         if(!adsp) adsp = (index & 0xFF);                                /* If 0, use context ID as address space ID */
 1548         
 1549         if (((getPerProc()->spcFlags) & FamVMmode) && (CEntry != act->machine.vmmCEntry))
 1550                 return kVmmBogusContext;                        /* Yes, invalid index in Fam */
 1551         
 1552         ret = vmm_protect_page(act, adsp, va, prot);    /* Go try to change access */
 1553         
 1554         if(ret == KERN_SUCCESS) {
 1555                 act->machine.vmmControl->vmmLastMap = va & 0xFFFFFFFFFFFFF000ULL;       /* Remember the last mapping we made */
 1556                 act->machine.vmmControl->vmmGFlags = (act->machine.vmmControl->vmmGFlags & ~vmmLastAdSp) | cndx;        /* Remember last address space */
 1557                 vmm_execute_vm(act, cndx);      /* Return was ok, launch the VM */
 1558         }
 1559         
 1560         return ret;                                                                             /* We had trouble of some kind (shouldn't happen) */    
 1561         
 1562 }
 1563 
 1564 
 1565 /*-----------------------------------------------------------------------
 1566 ** vmm_get_float_state
 1567 **
 1568 ** This function causes the current floating point state to 
 1569 ** be saved into the shared context area.  It also clears the
 1570 ** vmmFloatCngd changed flag.
 1571 **
 1572 ** Inputs:
 1573 **              act - pointer to current thread activation structure
 1574 **              index - index returned by vmm_init_context
 1575 **
 1576 ** Outputs:
 1577 **              context saved
 1578 -----------------------------------------------------------------------*/
 1579 
 1580 kern_return_t vmm_get_float_state(
 1581         thread_t                        act,
 1582         vmm_thread_index_t      index)
 1583 {
 1584         vmmCntrlEntry           *CEntry;
 1585         vmmCntrlTable           *CTable;
 1586         int                                     i;
 1587         register struct savearea_fpu *sv;
 1588 
 1589         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
 1590         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't vmm thread or the index is bogus */
 1591         
 1592         act->machine.specFlags &= ~floatCng;                            /* Clear the special flag */
 1593         CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd;       /* Clear the change indication */
 1594 
 1595         fpu_save(&CEntry->vmmFacCtx);                                   /* Save context if live */
 1596 
 1597         if(sv = CEntry->vmmFacCtx.FPUsave) {                    /* Is there context yet? */
 1598                 bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs), 32 * 8); /* 32 registers */
 1599                 return KERN_SUCCESS;
 1600         }
 1601 
 1602 
 1603         for(i = 0; i < 32; i++) {                                               /* Initialize floating points */
 1604                 CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit;        /* Initial value */
 1605         }
 1606 
 1607         return KERN_SUCCESS;
 1608 }
 1609 
 1610 /*-----------------------------------------------------------------------
 1611 ** vmm_get_vector_state
 1612 **
 1613 ** This function causes the current vector state to 
 1614 ** be saved into the shared context area.  It also clears the
 1615 ** vmmVectorCngd changed flag.
 1616 **
 1617 ** Inputs:
 1618 **              act - pointer to current thread activation structure
 1619 **              index - index returned by vmm_init_context
 1620 **
 1621 ** Outputs:
 1622 **              context saved
 1623 -----------------------------------------------------------------------*/
 1624 
 1625 kern_return_t vmm_get_vector_state(
 1626         thread_t                        act,
 1627         vmm_thread_index_t      index)
 1628 {
 1629         vmmCntrlEntry           *CEntry;
 1630         vmmCntrlTable           *CTable;
 1631         int                                     i, j;
 1632         unsigned int            vrvalidwrk;
 1633         register struct savearea_vec *sv;
 1634 
 1635         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
 1636         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't vmm thread or the index is bogus */
 1637 
 1638         vec_save(&CEntry->vmmFacCtx);                                   /* Save context if live */
 1639         
 1640         act->machine.specFlags &= ~vectorCng;                           /* Clear the special flag */
 1641         CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd;        /* Clear the change indication */
 1642         
 1643         if(sv = CEntry->vmmFacCtx.VMXsave) {                    /* Is there context yet? */
 1644 
 1645                 vrvalidwrk = sv->save_vrvalid;                          /* Get the valid flags */
 1646 
 1647                 for(i = 0; i < 32; i++) {                                       /* Copy the saved registers and invalidate the others */
 1648                         if(vrvalidwrk & 0x80000000) {                   /* Do we have a valid value here? */
 1649                                 for(j = 0; j < 4; j++) {                        /* If so, copy it over */
 1650                                         CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j];
 1651                                 }
 1652                         }
 1653                         else {
 1654                                 for(j = 0; j < 4; j++) {                        /* Otherwise set to empty value */
 1655                                         CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];
 1656                                 }
 1657                         }
 1658                         
 1659                         vrvalidwrk = vrvalidwrk << 1;                   /* Shift over to the next */
 1660                         
 1661                 }
 1662 
 1663                 return KERN_SUCCESS;
 1664         }
 1665 
 1666         for(i = 0; i < 32; i++) {                                               /* Initialize vector registers */
 1667                 for(j=0; j < 4; j++) {                                          /* Do words */
 1668                         CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j];               /* Initial value */
 1669                 }
 1670         }
 1671 
 1672         return KERN_SUCCESS;
 1673 }
 1674 
 1675 /*-----------------------------------------------------------------------
 1676 ** vmm_set_timer
 1677 **
 1678 ** This function causes a timer (in AbsoluteTime) for a specific time
 1679 ** to be set  It also clears the vmmTimerPop flag if the timer is actually 
 1680 ** set, it is cleared otherwise.
 1681 **
 1682 ** A timer is cleared by setting setting the time to 0. This will clear
 1683 ** the vmmTimerPop bit. Simply setting the timer to earlier than the
 1684 ** current time clears the internal timer request, but leaves the
 1685 ** vmmTimerPop flag set.
 1686 ** 
 1687 **
 1688 ** Inputs:
 1689 **              act - pointer to current thread activation structure
 1690 **              index - index returned by vmm_init_context
 1691 **              timerhi - high order word of AbsoluteTime to pop
 1692 **              timerlo - low order word of AbsoluteTime to pop
 1693 **
 1694 ** Outputs:
 1695 **              timer set, vmmTimerPop cleared
 1696 -----------------------------------------------------------------------*/
 1697 
 1698 kern_return_t vmm_set_timer(
 1699         thread_t                        act,
 1700         vmm_thread_index_t      index,
 1701         unsigned int            timerhi, 
 1702         unsigned int            timerlo)
 1703 {
 1704         vmmCntrlEntry           *CEntry;
 1705                 
 1706         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
 1707         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't vmm thread or the index is bogus */
 1708         
 1709         CEntry->vmmTimer = ((uint64_t)timerhi << 32) | timerlo;
 1710         
 1711         vmm_timer_pop(act);                                                             /* Go adjust all of the timer stuff */
 1712         return KERN_SUCCESS;                                                    /* Leave now... */
 1713 }
 1714 
 1715 
 1716 /*-----------------------------------------------------------------------
 1717 ** vmm_get_timer
 1718 **
 1719 ** This function causes the timer for a specified VM to be
 1720 ** returned in return_params[0] and return_params[1].
 1721 ** Note that this is kind of funky for 64-bit VMs because we
 1722 ** split the timer into two parts so that we still set parms 0 and 1.
 1723 ** Obviously, we don't need to do this because the parms are 8 bytes
 1724 ** wide.
 1725 ** 
 1726 **
 1727 ** Inputs:
 1728 **              act - pointer to current thread activation structure
 1729 **              index - index returned by vmm_init_context
 1730 **
 1731 ** Outputs:
 1732 **              Timer value set in return_params[0] and return_params[1].
 1733 **              Set to 0 if timer is not set.
 1734 -----------------------------------------------------------------------*/
 1735 
 1736 kern_return_t vmm_get_timer(
 1737         thread_t                        act,
 1738         vmm_thread_index_t      index)
 1739 {
 1740         vmmCntrlEntry           *CEntry;
 1741         vmmCntrlTable           *CTable;
 1742 
 1743         CEntry = vmm_get_entry(act, index);                             /* Convert index to entry */            
 1744         if (CEntry == NULL) return KERN_FAILURE;                /* Either this isn't vmm thread or the index is bogus */
 1745 
 1746         if(CEntry->vmmXAFlgs & vmm64Bit) {                              /* A 64-bit virtual machine? */
 1747                 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[0] = (uint32_t)(CEntry->vmmTimer >> 32);   /* Return the last timer value */
 1748                 CEntry->vmmContextKern->vmmRet.vmmrp64.return_params[1] = (uint32_t)CEntry->vmmTimer;   /* Return the last timer value */
 1749         }
 1750         else {
 1751                 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[0] = (CEntry->vmmTimer >> 32);     /* Return the last timer value */
 1752                 CEntry->vmmContextKern->vmmRet.vmmrp32.return_params[1] = (uint32_t)CEntry->vmmTimer;   /* Return the last timer value */
 1753         }
 1754         return KERN_SUCCESS;
 1755 }
 1756 
 1757 
 1758 /*-----------------------------------------------------------------------
 1759 ** vmm_timer_pop
 1760 **
 1761 ** This function causes all timers in the array of VMs to be updated.
 1762 ** All appropriate flags are set or reset.  If a VM is currently
 1763 ** running and its timer expired, it is intercepted.
 1764 **
 1765 ** The qactTimer value is set to the lowest unexpired timer.  It is
 1766 ** zeroed if all timers are expired or have been reset.
 1767 **
 1768 ** Inputs:
 1769 **              act - pointer to current thread activation structure
 1770 **
 1771 ** Outputs:
 1772 **              timers set, vmmTimerPop cleared or set
 1773 -----------------------------------------------------------------------*/
 1774 
 1775 void vmm_timer_pop(
 1776         thread_t                        act)
 1777 {
 1778         vmmCntrlEntry           *CEntry;
 1779         vmmCntrlTable           *CTable;
 1780         int                                     cvi, any;
 1781         uint64_t                        now, soonest;
 1782         savearea                        *sv;
 1783                 
 1784         if(!((unsigned int)act->machine.vmmControl & 0xFFFFFFFE)) {     /* Are there any virtual machines? */
 1785                 panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act);
 1786         }
 1787 
 1788         soonest = 0xFFFFFFFFFFFFFFFFULL;                                /* Max time */
 1789 
 1790         clock_get_uptime(&now);                                                 /* What time is it? */
 1791         
 1792         CTable = act->machine.vmmControl;                                       /* Make this easier */  
 1793         any = 0;                                                                                /* Haven't found a running unexpired timer yet */
 1794         
 1795         for(cvi = 0; cvi < kVmmMaxContexts; cvi++) {    /* Cycle through all and check time now */
 1796 
 1797                 if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue;  /* Do not check if the entry is empty */
 1798                 
 1799                 if(CTable->vmmc[cvi].vmmTimer == 0) {           /* Is the timer reset? */
 1800                         CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop;                     /* Clear timer popped */
 1801                         CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop;      /* Clear timer popped */
 1802                         continue;                                                               /* Check next */
 1803                 }
 1804 
 1805                 if (CTable->vmmc[cvi].vmmTimer <= now) {
 1806                         CTable->vmmc[cvi].vmmFlags |= vmmTimerPop;      /* Set timer popped here */
 1807                         CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop;       /* Set timer popped here */
 1808                         if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->machine.vmmCEntry) {  /* Is this the running VM? */
 1809                                 sv = find_user_regs(act);                       /* Get the user state registers */
 1810                                 if(!sv) {                                                       /* Did we find something? */
 1811                                         panic("vmm_timer_pop: no user context; act = %08X\n", act);
 1812                                 }
 1813                                 sv->save_exception = kVmmReturnNull*4;  /* Indicate that this is a null exception */
 1814                                 vmm_force_exit(act, sv);                        /* Intercept a running VM */
 1815                         }
 1816                         continue;                                                               /* Check the rest */
 1817                 }
 1818                 else {                                                                          /* It hasn't popped yet */
 1819                         CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop;     /* Set timer not popped here */
 1820                         CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop;      /* Set timer not popped here */
 1821                 }
 1822                 
 1823                 any = 1;                                                                        /* Show we found an active unexpired timer */
 1824                 
 1825                 if (CTable->vmmc[cvi].vmmTimer < soonest)
 1826                         soonest = CTable->vmmc[cvi].vmmTimer;
 1827         }
 1828         
 1829         if(any) {
 1830                 if (act->machine.qactTimer == 0 || soonest <= act->machine.qactTimer)
 1831                         act->machine.qactTimer = soonest;       /* Set lowest timer */
 1832         }
 1833 
 1834         return;
 1835 }
 1836 
 1837 
 1838 
 1839 /*-----------------------------------------------------------------------
 1840 ** vmm_stop_vm
 1841 **
 1842 ** This function prevents the specified VM(s) to from running.
 1843 ** If any is currently executing, the execution is intercepted
 1844 ** with a code of kVmmStopped.  Note that execution of the VM is
 1845 ** blocked until a vmmExecuteVM is called with the start flag set to 1.
 1846 ** This provides the ability for a thread to stop execution of a VM and
 1847 ** insure that it will not be run until the emulator has processed the
 1848 ** "virtual" interruption.
 1849 **
 1850 ** Inputs:
 1851 **              vmmask - 32 bit mask corresponding to the VMs to put in stop state
 1852 **                               NOTE: if this mask is all 0s, any executing VM is intercepted with
 1853 *                        a kVmmStopped (but not marked stopped), otherwise this is a no-op. Also note that there
 1854 **                               note that there is a potential race here and the VM may not stop.
 1855 **
 1856 ** Outputs:
 1857 **              kernel return code indicating success
 1858 **      or if no VMs are enabled, an invalid syscall exception.
 1859 -----------------------------------------------------------------------*/
 1860 
 1861 int vmm_stop_vm(struct savearea *save)
 1862 {
 1863 
 1864         thread_t                        act;
 1865         vmmCntrlTable           *CTable;
 1866         int                                     cvi, i;
 1867     task_t                              task;
 1868     thread_t                    fact;
 1869     unsigned int                vmmask;
 1870     ReturnHandler               *stopapc;
 1871 
 1872         ml_set_interrupts_enabled(TRUE);                        /* This can take a bit of time so pass interruptions */
 1873         
 1874         task = current_task();                                          /* Figure out who we are */
 1875 
 1876         task_lock(task);                                                        /* Lock our task */
 1877 
 1878         fact = (thread_t)task->threads.next;    /* Get the first activation on task */
 1879         act = 0;                                                                        /* Pretend we didn't find it yet */
 1880 
 1881         for(i = 0; i < task->thread_count; i++) {       /* All of the activations */
 1882                 if(fact->machine.vmmControl) {                          /* Is this a virtual machine monitor? */
 1883                         act = fact;                                                     /* Yeah... */
 1884                         break;                                                          /* Bail the loop... */
 1885                 }
 1886                 fact = (thread_t)fact->task_threads.next;       /* Go to the next one */
 1887         }
 1888 
 1889         if(!((unsigned int)act)) {                                      /* See if we have VMMs yet */
 1890                 task_unlock(task);                                              /* No, unlock the task */
 1891                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
 1892                 return 0;                                                               /* Go generate a syscall exception */
 1893         }
 1894 
 1895         thread_reference(act);
 1896 
 1897         task_unlock(task);                                                      /* Safe to release now */
 1898 
 1899         thread_mtx_lock(act);
 1900 
 1901         CTable = act->machine.vmmControl;                               /* Get the pointer to the table */
 1902         
 1903         if(!((unsigned int)CTable & -2)) {                      /* Are there any all the way up yet? */
 1904                 thread_mtx_unlock(act);                                 /* Unlock the activation */
 1905                 thread_deallocate(act);
 1906                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
 1907                 return 0;                                                               /* Go generate a syscall exception */
 1908         }
 1909         
 1910         if(!(vmmask = save->save_r3)) {                         /* Get the stop mask and check if all zeros */
 1911                 thread_mtx_unlock(act);                                 /* Unlock the activation */
 1912                 thread_deallocate(act);
 1913                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
 1914                 save->save_r3 = KERN_SUCCESS;                   /* Set success */       
 1915                 return 1;                                                               /* Return... */
 1916         }
 1917 
 1918         for(cvi = 0; cvi < kVmmMaxContexts; cvi++) {    /* Search slots */
 1919                 if((0x80000000 & vmmask) && (CTable->vmmc[cvi].vmmFlags & vmmInUse)) {  /* See if we need to stop and if it is in use */
 1920                         hw_atomic_or(&CTable->vmmc[cvi].vmmFlags, vmmXStop);    /* Set this one to stop */
 1921                 }
 1922                 vmmask = vmmask << 1;                                   /* Slide mask over */
 1923         }
 1924         
 1925         if(hw_compare_and_store(0, 1, &act->machine.emPendRupts)) {     /* See if there is already a stop pending and lock out others if not */
 1926                 thread_mtx_unlock(act);                                 /* Already one pending, unlock the activation */
 1927                 thread_deallocate(act);
 1928                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
 1929                 save->save_r3 = KERN_SUCCESS;                   /* Say we did it... */  
 1930                 return 1;                                                               /* Leave */
 1931         }
 1932 
 1933         if(!(stopapc = (ReturnHandler *)kalloc(sizeof(ReturnHandler)))) {       /* Get a return handler control block */
 1934                 act->machine.emPendRupts = 0;                           /* No memory, say we have given up request */
 1935                 thread_mtx_unlock(act);                                 /* Unlock the activation */
 1936                 thread_deallocate(act);
 1937                 ml_set_interrupts_enabled(FALSE);               /* Set back interruptions */
 1938                 save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */
 1939                 return 1;                                                               /* Return... */
 1940         }
 1941 
 1942         ml_set_interrupts_enabled(FALSE);                       /* Disable interruptions for now */
 1943 
 1944         stopapc->handler = vmm_interrupt;                       /* Set interruption routine */
 1945 
 1946         stopapc->next = act->handlers;                          /* Put our interrupt at the start of the list */
 1947         act->handlers = stopapc;                                        /* Point to us */
 1948 
 1949         act_set_apc(act);                                                       /* Set an APC AST */
 1950         ml_set_interrupts_enabled(TRUE);                        /* Enable interruptions now */
 1951 
 1952         thread_mtx_unlock(act);                                         /* Unlock the activation */
 1953         thread_deallocate(act);
 1954         
 1955         ml_set_interrupts_enabled(FALSE);                       /* Set back interruptions */
 1956         save->save_r3 = KERN_SUCCESS;                           /* Hip, hip, horay... */        
 1957         return 1;
 1958 }
 1959 
 1960 /*-----------------------------------------------------------------------
 1961 ** vmm_interrupt
 1962 **
 1963 ** This function is executed asynchronously from an APC AST.
 1964 ** It is to be used for anything that needs to interrupt a running VM.
 1965 ** This include any kind of interruption generation (other than timer pop)
 1966 ** or entering the stopped state.
 1967 **
 1968 ** Inputs:
 1969 **              ReturnHandler *rh - the return handler control block as required by the APC.
 1970 **              thread_t act  - the activation
 1971 **
 1972 ** Outputs:
 1973 **              Whatever needed to be done is done.
 1974 -----------------------------------------------------------------------*/
 1975 
 1976 void vmm_interrupt(ReturnHandler *rh, thread_t act) {
 1977 
 1978         vmmCntrlTable           *CTable;
 1979         savearea                        *sv;
 1980         boolean_t                       inter;
 1981 
 1982 
 1983 
 1984         kfree(rh, sizeof(ReturnHandler));       /* Release the return handler block */
 1985         
 1986         inter  = ml_set_interrupts_enabled(FALSE);      /* Disable interruptions for now */
 1987 
 1988         act->machine.emPendRupts = 0;                                   /* Say that there are no more interrupts pending */
 1989         CTable = act->machine.vmmControl;                               /* Get the pointer to the table */
 1990         
 1991         if(!((unsigned int)CTable & -2)) return;        /* Leave if we aren't doing VMs any more... */
 1992 
 1993         if(act->machine.vmmCEntry && (act->machine.vmmCEntry->vmmFlags & vmmXStop)) {   /* Do we need to stop the running guy? */
 1994                 sv = find_user_regs(act);                               /* Get the user state registers */
 1995                 if(!sv) {                                                               /* Did we find something? */
 1996                         panic("vmm_interrupt: no user context; act = %08X\n", act);
 1997                 }
 1998                 sv->save_exception = kVmmStopped*4;             /* Set a "stopped" exception */
 1999                 vmm_force_exit(act, sv);                                /* Intercept a running VM */
 2000         }
 2001         ml_set_interrupts_enabled(inter);                       /* Put interrupts back to what they were */
 2002 
 2003         return;
 2004 }

Cache object: 810b403ac2525696a16cb146e63ed521


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.