The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/vm86.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Jonathan Lemon
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 
   29 #include "opt_vm86.h"
   30 
   31 #include <sys/param.h>
   32 #include <sys/systm.h>
   33 #include <sys/proc.h>
   34 #include <sys/lock.h>
   35 #include <sys/malloc.h>
   36 
   37 #include <vm/vm.h>
   38 #include <vm/vm_prot.h>
   39 #include <vm/pmap.h>
   40 #include <vm/vm_map.h>
   41 #include <vm/vm_page.h>
   42 #include <vm/vm_param.h>
   43 
   44 #include <sys/user.h>
   45 
   46 #include <machine/md_var.h>
   47 #include <machine/pcb_ext.h>    /* pcb.h included via sys/user.h */
   48 #include <machine/psl.h>
   49 #include <machine/specialreg.h>
   50 
   51 extern int i386_extend_pcb      __P((struct proc *));
   52 extern struct segment_descriptor common_tssd;
   53 extern int vm86paddr, vm86pa;
   54 extern struct pcb *vm86pcb;
   55 
   56 extern int vm86_bioscall(struct vm86frame *);
   57 extern void vm86_biosret(struct vm86frame *);
   58 
   59 void vm86_prepcall(struct vm86frame);
   60  
   61 #define HLT     0xf4
   62 #define CLI     0xfa
   63 #define STI     0xfb
   64 #define PUSHF   0x9c
   65 #define POPF    0x9d
   66 #define INTn    0xcd
   67 #define IRET    0xcf
   68 #define CALLm   0xff
   69 #define OPERAND_SIZE_PREFIX     0x66
   70 #define ADDRESS_SIZE_PREFIX     0x67
   71 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
   72 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
   73 
   74 static __inline caddr_t
   75 MAKE_ADDR(u_short sel, u_short off)
   76 {
   77         return ((caddr_t)((sel << 4) + off));
   78 }
   79 
   80 static __inline void
   81 GET_VEC(u_int vec, u_short *sel, u_short *off)
   82 {
   83         *sel = vec >> 16;
   84         *off = vec & 0xffff;
   85 }
   86 
   87 static __inline u_int
   88 MAKE_VEC(u_short sel, u_short off)
   89 {
   90         return ((sel << 16) | off);
   91 }
   92 
   93 static __inline void
   94 PUSH(u_short x, struct vm86frame *vmf)
   95 {
   96         vmf->vmf_sp -= 2;
   97         susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
   98 }
   99 
  100 static __inline void
  101 PUSHL(u_int x, struct vm86frame *vmf)
  102 {
  103         vmf->vmf_sp -= 4;
  104         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  105 }
  106 
  107 static __inline u_short
  108 POP(struct vm86frame *vmf)
  109 {
  110         u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  111 
  112         vmf->vmf_sp += 2;
  113         return (x);
  114 }
  115 
  116 static __inline u_int
  117 POPL(struct vm86frame *vmf)
  118 {
  119         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  120 
  121         vmf->vmf_sp += 4;
  122         return (x);
  123 }
  124 
  125 int
  126 vm86_emulate(vmf)
  127         struct vm86frame *vmf;
  128 {
  129         struct vm86_kernel *vm86;
  130         caddr_t addr;
  131         u_char i_byte;
  132         u_int temp_flags;
  133         int inc_ip = 1;
  134         int retcode = 0;
  135 
  136         /*
  137          * pcb_ext contains the address of the extension area, or zero if
  138          * the extension is not present.  (This check should not be needed,
  139          * as we can't enter vm86 mode until we set up an extension area)
  140          */
  141         if (curpcb->pcb_ext == 0)
  142                 return (SIGBUS);
  143         vm86 = &curpcb->pcb_ext->ext_vm86;
  144 
  145         if (vmf->vmf_eflags & PSL_T)
  146                 retcode = SIGTRAP;
  147 
  148         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  149         i_byte = fubyte(addr);
  150         if (i_byte == ADDRESS_SIZE_PREFIX) {
  151                 i_byte = fubyte(++addr);
  152                 inc_ip++;
  153         }
  154 
  155         if (vm86->vm86_has_vme) {
  156                 switch (i_byte) {
  157                 case OPERAND_SIZE_PREFIX:
  158                         i_byte = fubyte(++addr);
  159                         inc_ip++;
  160                         switch (i_byte) {
  161                         case PUSHF:
  162                                 if (vmf->vmf_eflags & PSL_VIF)
  163                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  164                                             | PSL_IOPL | PSL_I, vmf);
  165                                 else
  166                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  167                                             | PSL_IOPL, vmf);
  168                                 vmf->vmf_ip += inc_ip;
  169                                 return (0);
  170 
  171                         case POPF:
  172                                 temp_flags = POPL(vmf) & POP_MASK;
  173                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  174                                     | temp_flags | PSL_VM | PSL_I;
  175                                 vmf->vmf_ip += inc_ip;
  176                                 if (temp_flags & PSL_I) {
  177                                         vmf->vmf_eflags |= PSL_VIF;
  178                                         if (vmf->vmf_eflags & PSL_VIP)
  179                                                 break;
  180                                 } else {
  181                                         vmf->vmf_eflags &= ~PSL_VIF;
  182                                 }
  183                                 return (0);
  184                         }
  185                         break;
  186 
  187                 /* VME faults here if VIP is set, but does not set VIF. */
  188                 case STI:
  189                         vmf->vmf_eflags |= PSL_VIF;
  190                         vmf->vmf_ip += inc_ip;
  191                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
  192                                 uprintf("fatal sti\n");
  193                                 return (SIGKILL);
  194                         }
  195                         break;
  196 
  197                 /* VME if no redirection support */
  198                 case INTn:
  199                         break;
  200 
  201                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  202                 case POPF:
  203                         temp_flags = POP(vmf) & POP_MASK;
  204                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  205                             | temp_flags | PSL_VM | PSL_I;
  206                         vmf->vmf_ip += inc_ip;
  207                         if (temp_flags & PSL_I) {
  208                                 vmf->vmf_eflags |= PSL_VIF;
  209                                 if (vmf->vmf_eflags & PSL_VIP)
  210                                         break;
  211                         } else {
  212                                 vmf->vmf_eflags &= ~PSL_VIF;
  213                         }
  214                         return (retcode);
  215 
  216                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  217                 case IRET:
  218                         vmf->vmf_ip = POP(vmf);
  219                         vmf->vmf_cs = POP(vmf);
  220                         temp_flags = POP(vmf) & POP_MASK;
  221                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  222                             | temp_flags | PSL_VM | PSL_I;
  223                         if (temp_flags & PSL_I) {
  224                                 vmf->vmf_eflags |= PSL_VIF;
  225                                 if (vmf->vmf_eflags & PSL_VIP)
  226                                         break;
  227                         } else {
  228                                 vmf->vmf_eflags &= ~PSL_VIF;
  229                         }
  230                         return (retcode);
  231 
  232                 }
  233                 return (SIGBUS);
  234         }
  235 
  236         switch (i_byte) {
  237         case OPERAND_SIZE_PREFIX:
  238                 i_byte = fubyte(++addr);
  239                 inc_ip++;
  240                 switch (i_byte) {
  241                 case PUSHF:
  242                         if (vm86->vm86_eflags & PSL_VIF)
  243                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  244                                     | PSL_IOPL | PSL_I, vmf);
  245                         else
  246                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  247                                     | PSL_IOPL, vmf);
  248                         vmf->vmf_ip += inc_ip;
  249                         return (retcode);
  250 
  251                 case POPF:
  252                         temp_flags = POPL(vmf) & POP_MASK;
  253                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  254                             | temp_flags | PSL_VM | PSL_I;
  255                         vmf->vmf_ip += inc_ip;
  256                         if (temp_flags & PSL_I) {
  257                                 vm86->vm86_eflags |= PSL_VIF;
  258                                 if (vm86->vm86_eflags & PSL_VIP)
  259                                         break;
  260                         } else {
  261                                 vm86->vm86_eflags &= ~PSL_VIF;
  262                         }
  263                         return (retcode);
  264                 }
  265                 return (SIGBUS);
  266 
  267         case CLI:
  268                 vm86->vm86_eflags &= ~PSL_VIF;
  269                 vmf->vmf_ip += inc_ip;
  270                 return (retcode);
  271 
  272         case STI:
  273                 /* if there is a pending interrupt, go to the emulator */
  274                 vm86->vm86_eflags |= PSL_VIF;
  275                 vmf->vmf_ip += inc_ip;
  276                 if (vm86->vm86_eflags & PSL_VIP)
  277                         break;
  278                 return (retcode);
  279 
  280         case PUSHF:
  281                 if (vm86->vm86_eflags & PSL_VIF)
  282                         PUSH((vmf->vmf_flags & PUSH_MASK)
  283                             | PSL_IOPL | PSL_I, vmf);
  284                 else
  285                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  286                 vmf->vmf_ip += inc_ip;
  287                 return (retcode);
  288 
  289         case INTn:
  290                 i_byte = fubyte(addr + 1);
  291                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
  292                         break;
  293                 if (vm86->vm86_eflags & PSL_VIF)
  294                         PUSH((vmf->vmf_flags & PUSH_MASK)
  295                             | PSL_IOPL | PSL_I, vmf);
  296                 else
  297                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  298                 PUSH(vmf->vmf_cs, vmf);
  299                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
  300                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
  301                      &vmf->vmf_cs, &vmf->vmf_ip);
  302                 vmf->vmf_flags &= ~PSL_T;
  303                 vm86->vm86_eflags &= ~PSL_VIF;
  304                 return (retcode);
  305 
  306         case IRET:
  307                 vmf->vmf_ip = POP(vmf);
  308                 vmf->vmf_cs = POP(vmf);
  309                 temp_flags = POP(vmf) & POP_MASK;
  310                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  311                     | temp_flags | PSL_VM | PSL_I;
  312                 if (temp_flags & PSL_I) {
  313                         vm86->vm86_eflags |= PSL_VIF;
  314                         if (vm86->vm86_eflags & PSL_VIP)
  315                                 break;
  316                 } else {
  317                         vm86->vm86_eflags &= ~PSL_VIF;
  318                 }
  319                 return (retcode);
  320 
  321         case POPF:
  322                 temp_flags = POP(vmf) & POP_MASK;
  323                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  324                     | temp_flags | PSL_VM | PSL_I;
  325                 vmf->vmf_ip += inc_ip;
  326                 if (temp_flags & PSL_I) {
  327                         vm86->vm86_eflags |= PSL_VIF;
  328                         if (vm86->vm86_eflags & PSL_VIP)
  329                                 break;
  330                 } else {
  331                         vm86->vm86_eflags &= ~PSL_VIF;
  332                 }
  333                 return (retcode);
  334         }
  335         return (SIGBUS);
  336 }
  337 
  338 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
  339 #define INTMAP_SIZE     32
  340 #define IOMAP_SIZE      ctob(IOPAGES)
  341 #define TSS_SIZE \
  342         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
  343          INTMAP_SIZE + IOMAP_SIZE + 1)
  344 
  345 struct vm86_layout {
  346         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
  347         struct  pcb vml_pcb;
  348         struct  pcb_ext vml_ext;
  349         char    vml_intmap[INTMAP_SIZE];
  350         char    vml_iomap[IOMAP_SIZE];
  351         char    vml_iomap_trailer;
  352 };
  353 
  354 static void
  355 vm86_initialize(void)
  356 {
  357         int i;
  358         u_int *addr;
  359         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
  360         struct pcb *pcb;
  361         struct pcb_ext *ext;
  362         struct soft_segment_descriptor ssd = {
  363                 0,                      /* segment base address (overwritten) */
  364                 0,                      /* length (overwritten) */
  365                 SDT_SYS386TSS,          /* segment type */
  366                 0,                      /* priority level */
  367                 1,                      /* descriptor present */
  368                 0, 0,
  369                 0,                      /* default 16 size */
  370                 0                       /* granularity */
  371         };
  372 
  373         /*
  374          * this should be a compile time error, but cpp doesn't grok sizeof().
  375          */
  376         if (sizeof(struct vm86_layout) > ctob(3))
  377                 panic("struct vm86_layout exceeds space allocated in locore.s");
  378 
  379         /*
  380          * Below is the memory layout that we use for the vm86 region.
  381          *
  382          * +--------+
  383          * |        | 
  384          * |        |
  385          * | page 0 |       
  386          * |        | +--------+
  387          * |        | | stack  |
  388          * +--------+ +--------+ <--------- vm86paddr
  389          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
  390          * |        | +--------+
  391          * |        | |  PCB   | size: ~240 bytes
  392          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
  393          * |        | +--------+
  394          * |        | |int map |
  395          * |        | +--------+
  396          * +--------+ |        |
  397          * | page 2 | |  I/O   |
  398          * +--------+ | bitmap |
  399          * | page 3 | |        |
  400          * |        | +--------+
  401          * +--------+ 
  402          */
  403 
  404         /*
  405          * A rudimentary PCB must be installed, in order to get to the
  406          * PCB extension area.  We use the PCB area as a scratchpad for
  407          * data storage, the layout of which is shown below.
  408          *
  409          * pcb_esi      = new PTD entry 0
  410          * pcb_ebp      = pointer to frame on vm86 stack
  411          * pcb_esp      =    stack frame pointer at time of switch
  412          * pcb_ebx      = va of vm86 page table
  413          * pcb_eip      =    argument pointer to initial call
  414          * pcb_fs       =    saved TSS descriptor, word 0
  415          * pcb_gs       =    saved TSS descriptor, word 1
  416          */
  417 #define new_ptd         pcb_esi
  418 #define vm86_frame      pcb_ebp
  419 #define pgtable_va      pcb_ebx
  420 
  421         pcb = &vml->vml_pcb;
  422         ext = &vml->vml_ext;
  423 
  424         bzero(pcb, sizeof(struct pcb));
  425         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
  426         pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
  427         pcb->pgtable_va = vm86paddr;
  428         pcb->pcb_ext = ext;
  429 
  430         bzero(ext, sizeof(struct pcb_ext)); 
  431         ext->ext_tss.tss_esp0 = vm86paddr;
  432         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  433         ext->ext_tss.tss_ioopt = 
  434                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
  435         ext->ext_iomap = vml->vml_iomap;
  436         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
  437 
  438         if (cpu_feature & CPUID_VME)
  439                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  440 
  441         addr = (u_int *)ext->ext_vm86.vm86_intmap;
  442         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
  443                 *addr++ = 0;
  444         vml->vml_iomap_trailer = 0xff;
  445 
  446         ssd.ssd_base = (u_int)&ext->ext_tss;
  447         ssd.ssd_limit = TSS_SIZE - 1; 
  448         ssdtosd(&ssd, &ext->ext_tssd);
  449 
  450         vm86pcb = pcb;
  451 }
  452 
  453 vm_offset_t
  454 vm86_getpage(struct vm86context *vmc, int pagenum)
  455 {
  456         int i;
  457 
  458         for (i = 0; i < vmc->npages; i++)
  459                 if (vmc->pmap[i].pte_num == pagenum)
  460                         return (vmc->pmap[i].kva);
  461         return (0);
  462 }
  463 
  464 vm_offset_t
  465 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
  466 {
  467         int i, flags = 0;
  468 
  469         for (i = 0; i < vmc->npages; i++)
  470                 if (vmc->pmap[i].pte_num == pagenum)
  471                         goto bad;
  472 
  473         if (vmc->npages == VM86_PMAPSIZE)
  474                 goto bad;                       /* XXX grow map? */
  475 
  476         if (kva == 0) {
  477                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
  478                 flags = VMAP_MALLOC;
  479         }
  480 
  481         i = vmc->npages++;
  482         vmc->pmap[i].flags = flags;
  483         vmc->pmap[i].kva = kva;
  484         vmc->pmap[i].pte_num = pagenum;
  485         return (kva);
  486 bad:
  487         panic("vm86_addpage: not enough room, or overlap");
  488 }
  489 
  490 void
  491 initial_bioscalls(u_int *basemem, u_int *extmem)
  492 {
  493         int i, method;
  494         struct vm86frame vmf;
  495         struct vm86context vmc;
  496         u_int64_t highwat = 0;
  497         pt_entry_t pte;
  498         struct {
  499                 u_int64_t base;
  500                 u_int64_t length;
  501                 u_int32_t type;
  502         } *smap;
  503 
  504         bzero(&vmf, sizeof(struct vm86frame));          /* safety */
  505         vm86_initialize();
  506 
  507 #ifndef PC98
  508         vm86_intcall(0x12, &vmf);
  509         *basemem = vmf.vmf_ax;
  510         *extmem = 0;
  511 
  512         /*
  513          * if basemem != 640, map pages r/w into vm86 page table so 
  514          * that the bios can scribble on it.
  515          */
  516         pte = (pt_entry_t)vm86paddr;
  517         for (i = *basemem / 4; i < 160; i++)
  518                 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
  519 
  520         /*
  521          * map page 1 R/W into the kernel page table so we can use it
  522          * as a buffer.  The kernel will unmap this page later.
  523          */
  524         pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
  525         *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
  526 
  527         /*
  528          * get memory map with INT 15:E820
  529          */
  530 #define SMAPSIZ         sizeof(*smap)
  531 #define SMAP_SIG        0x534D4150                      /* 'SMAP' */
  532 
  533         vmc.npages = 0;
  534         smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
  535         vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
  536 
  537         vmf.vmf_ebx = 0;
  538         do {
  539                 vmf.vmf_eax = 0xE820;
  540                 vmf.vmf_edx = SMAP_SIG;
  541                 vmf.vmf_ecx = SMAPSIZ;
  542                 i = vm86_datacall(0x15, &vmf, &vmc);
  543                 if (i || vmf.vmf_eax != SMAP_SIG)
  544                         break;
  545                 if (smap->type == 0x01 && smap->base >= highwat) {
  546                         *extmem += (smap->length / 1024);
  547                         highwat = smap->base + smap->length;
  548                 }
  549         } while (vmf.vmf_ebx != 0);
  550 
  551         if (*extmem != 0) {
  552                 if (*extmem > *basemem) {
  553                         *extmem -= *basemem;
  554                         method = 0xE820;
  555                         goto done;
  556                 }
  557                 printf("E820: extmem (%d) < basemem (%d)\n", *extmem, *basemem);
  558         }
  559 
  560         /*
  561          * try memory map with INT 15:E801
  562          */
  563         vmf.vmf_ax = 0xE801;
  564         if (vm86_intcall(0x15, &vmf) == 0) {
  565                 *extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
  566                 method = 0xE801;
  567                 goto done;
  568         }
  569 
  570         vmf.vmf_ah = 0x88;
  571         vm86_intcall(0x15, &vmf);
  572         *extmem = vmf.vmf_ax;
  573         method = 0x88;
  574 
  575 done:
  576         printf("BIOS basemem: %dK, extmem: %dK (from %#x call)\n",
  577             *basemem, *extmem, method);
  578 #endif /* !PC98 */
  579 }
  580 
  581 static void
  582 vm86_initflags(struct vm86frame *vmf)
  583 {
  584         int eflags = vmf->vmf_eflags;
  585         struct vm86_kernel *vm86 = &curpcb->pcb_ext->ext_vm86;
  586 
  587         if (vm86->vm86_has_vme) {
  588                 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
  589                     (eflags & VME_USERCHANGE) | PSL_VM;
  590         } else {
  591                 vm86->vm86_eflags = eflags;     /* save VIF, VIP */
  592                 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |             
  593                     (eflags & VM_USERCHANGE) | PSL_VM;
  594         }
  595         vmf->vmf_eflags = eflags | PSL_VM;
  596 }
  597 
  598 /*
  599  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
  600  */
  601 void
  602 vm86_prepcall(struct vm86frame vmf)
  603 {
  604         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
  605         u_char intcall[] = {
  606                 CLI, INTn, 0x00, STI, HLT
  607         };
  608 
  609         if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
  610                 /* interrupt call requested */
  611                 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
  612                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
  613                 vmf.vmf_ip = addr[0];
  614                 vmf.vmf_cs = 0;
  615         }
  616         vmf.vmf_sp = addr[1] - 2;              /* keep aligned */
  617         vmf.kernel_es = vmf.kernel_ds = 0;
  618         vmf.vmf_ss = 0;
  619         vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
  620         vm86_initflags(&vmf);
  621 }
  622 
  623 /*
  624  * vm86 trap handler; determines whether routine succeeded or not.
  625  * Called while in vm86 space, returns to calling process.
  626  */
  627 void
  628 vm86_trap(struct vm86frame *vmf)
  629 {
  630         caddr_t addr;
  631 
  632         /* "should not happen" */
  633         if ((vmf->vmf_eflags & PSL_VM) == 0)
  634                 panic("vm86_trap called, but not in vm86 mode");
  635 
  636         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  637         if (*(u_char *)addr == HLT)
  638                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
  639         else
  640                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
  641 
  642         vm86_biosret(vmf);
  643 }
  644 
  645 int
  646 vm86_intcall(int intnum, struct vm86frame *vmf)
  647 {
  648         if (intnum < 0 || intnum > 0xff)
  649                 return (EINVAL);
  650 
  651         vmf->vmf_trapno = intnum;
  652         return (vm86_bioscall(vmf));
  653 }
  654 
  655 /*
  656  * struct vm86context contains the page table to use when making
  657  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
  658  * the "interrupt trampoline" will be used, otherwise we use the
  659  * caller's cs:ip routine.  
  660  */
  661 int
  662 vm86_datacall(intnum, vmf, vmc)
  663         int intnum;
  664         struct vm86frame *vmf;
  665         struct vm86context *vmc;
  666 {
  667         pt_entry_t pte = (pt_entry_t)vm86paddr;
  668         u_int page;
  669         int i, entry, retval;
  670 
  671         for (i = 0; i < vmc->npages; i++) {
  672                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
  673                 entry = vmc->pmap[i].pte_num; 
  674                 vmc->pmap[i].old_pte = pte[entry];
  675                 pte[entry] = page | PG_V | PG_RW | PG_U;
  676         }
  677 
  678         vmf->vmf_trapno = intnum;
  679         retval = vm86_bioscall(vmf);
  680 
  681         for (i = 0; i < vmc->npages; i++) {
  682                 entry = vmc->pmap[i].pte_num;
  683                 pte[entry] = vmc->pmap[i].old_pte;
  684         }
  685 
  686         return (retval);
  687 }
  688 
  689 vm_offset_t
  690 vm86_getaddr(vmc, sel, off)
  691         struct vm86context *vmc;
  692         u_short sel;
  693         u_short off;
  694 {
  695         int i, page;
  696         vm_offset_t addr;
  697 
  698         addr = (vm_offset_t)MAKE_ADDR(sel, off);
  699         page = addr >> PAGE_SHIFT;
  700         for (i = 0; i < vmc->npages; i++)
  701                 if (page == vmc->pmap[i].pte_num)
  702                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
  703         return (0);
  704 }
  705 
  706 int
  707 vm86_getptr(vmc, kva, sel, off)
  708         struct vm86context *vmc;
  709         vm_offset_t kva;
  710         u_short *sel;
  711         u_short *off;
  712 {
  713         int i;
  714 
  715         for (i = 0; i < vmc->npages; i++)
  716                 if (kva >= vmc->pmap[i].kva &&
  717                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
  718                         *off = kva - vmc->pmap[i].kva;
  719                         *sel = vmc->pmap[i].pte_num << 8;
  720                         return (1);
  721                 }
  722         return (0);
  723         panic("vm86_getptr: address not found");
  724 }
  725         
  726 int
  727 vm86_sysarch(p, args)
  728         struct proc *p;
  729         char *args;
  730 {
  731         int error = 0;
  732         struct i386_vm86_args ua;
  733         struct vm86_kernel *vm86;
  734 
  735         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
  736                 return (error);
  737 
  738         if (p->p_addr->u_pcb.pcb_ext == 0)
  739                 if ((error = i386_extend_pcb(p)) != 0)
  740                         return (error);
  741         vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86;
  742 
  743         switch (ua.sub_op) {
  744         case VM86_INIT: {
  745                 struct vm86_init_args sa;
  746 
  747                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
  748                         return (error);
  749                 if (cpu_feature & CPUID_VME)
  750                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  751                 else
  752                         vm86->vm86_has_vme = 0;
  753                 vm86->vm86_inited = 1;
  754                 vm86->vm86_debug = sa.debug;
  755                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
  756                 }
  757                 break;
  758 
  759 #if 0
  760         case VM86_SET_VME: {
  761                 struct vm86_vme_args sa;
  762         
  763                 if ((cpu_feature & CPUID_VME) == 0)
  764                         return (ENODEV);
  765 
  766                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
  767                         return (error);
  768                 if (sa.state)
  769                         load_cr4(rcr4() | CR4_VME);
  770                 else
  771                         load_cr4(rcr4() & ~CR4_VME);
  772                 }
  773                 break;
  774 #endif
  775 
  776         case VM86_GET_VME: {
  777                 struct vm86_vme_args sa;
  778 
  779                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
  780                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  781                 }
  782                 break;
  783 
  784 #if 0
  785         case VM86_INTCALL: {
  786                 struct vm86_intcall_args sa;
  787 
  788                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
  789                         return (error);
  790                 if (error = vm86_intcall(sa.intnum, &sa.vmf))
  791                         return (error);
  792                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  793                 }
  794                 break;
  795 #endif
  796 
  797         default:
  798                 error = EINVAL;
  799         }
  800         return (error);
  801 }

Cache object: f468ecb95f5fe430cba0e995b7d7f74a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.