The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/vm86.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Jonathan Lemon
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/6.0/sys/i386/i386/vm86.c 138129 2004-11-27 06:51:39Z das $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/proc.h>
   33 #include <sys/lock.h>
   34 #include <sys/malloc.h>
   35 #include <sys/mutex.h>
   36 
   37 #include <vm/vm.h>
   38 #include <vm/pmap.h>
   39 #include <vm/vm_map.h>
   40 #include <vm/vm_page.h>
   41 
   42 #include <machine/md_var.h>
   43 #include <machine/pcb.h>
   44 #include <machine/pcb_ext.h>
   45 #include <machine/psl.h>
   46 #include <machine/specialreg.h>
   47 #include <machine/sysarch.h>
   48 
   49 extern int vm86pa;
   50 extern struct pcb *vm86pcb;
   51 
   52 static struct mtx vm86_lock;
   53 
   54 extern int vm86_bioscall(struct vm86frame *);
   55 extern void vm86_biosret(struct vm86frame *);
   56 
   57 void vm86_prepcall(struct vm86frame);
   58 
   59 struct system_map {
   60         int             type;
   61         vm_offset_t     start;
   62         vm_offset_t     end;
   63 };
   64 
   65 #define HLT     0xf4
   66 #define CLI     0xfa
   67 #define STI     0xfb
   68 #define PUSHF   0x9c
   69 #define POPF    0x9d
   70 #define INTn    0xcd
   71 #define IRET    0xcf
   72 #define CALLm   0xff
   73 #define OPERAND_SIZE_PREFIX     0x66
   74 #define ADDRESS_SIZE_PREFIX     0x67
   75 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
   76 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
   77 
   78 static __inline caddr_t
   79 MAKE_ADDR(u_short sel, u_short off)
   80 {
   81         return ((caddr_t)((sel << 4) + off));
   82 }
   83 
   84 static __inline void
   85 GET_VEC(u_int vec, u_short *sel, u_short *off)
   86 {
   87         *sel = vec >> 16;
   88         *off = vec & 0xffff;
   89 }
   90 
   91 static __inline u_int
   92 MAKE_VEC(u_short sel, u_short off)
   93 {
   94         return ((sel << 16) | off);
   95 }
   96 
   97 static __inline void
   98 PUSH(u_short x, struct vm86frame *vmf)
   99 {
  100         vmf->vmf_sp -= 2;
  101         suword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  102 }
  103 
  104 static __inline void
  105 PUSHL(u_int x, struct vm86frame *vmf)
  106 {
  107         vmf->vmf_sp -= 4;
  108         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  109 }
  110 
  111 static __inline u_short
  112 POP(struct vm86frame *vmf)
  113 {
  114         u_short x = fuword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  115 
  116         vmf->vmf_sp += 2;
  117         return (x);
  118 }
  119 
  120 static __inline u_int
  121 POPL(struct vm86frame *vmf)
  122 {
  123         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  124 
  125         vmf->vmf_sp += 4;
  126         return (x);
  127 }
  128 
  129 int
  130 vm86_emulate(vmf)
  131         struct vm86frame *vmf;
  132 {
  133         struct vm86_kernel *vm86;
  134         caddr_t addr;
  135         u_char i_byte;
  136         u_int temp_flags;
  137         int inc_ip = 1;
  138         int retcode = 0;
  139 
  140         /*
  141          * pcb_ext contains the address of the extension area, or zero if
  142          * the extension is not present.  (This check should not be needed,
  143          * as we can't enter vm86 mode until we set up an extension area)
  144          */
  145         if (PCPU_GET(curpcb)->pcb_ext == 0)
  146                 return (SIGBUS);
  147         vm86 = &PCPU_GET(curpcb)->pcb_ext->ext_vm86;
  148 
  149         if (vmf->vmf_eflags & PSL_T)
  150                 retcode = SIGTRAP;
  151 
  152         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  153         i_byte = fubyte(addr);
  154         if (i_byte == ADDRESS_SIZE_PREFIX) {
  155                 i_byte = fubyte(++addr);
  156                 inc_ip++;
  157         }
  158 
  159         if (vm86->vm86_has_vme) {
  160                 switch (i_byte) {
  161                 case OPERAND_SIZE_PREFIX:
  162                         i_byte = fubyte(++addr);
  163                         inc_ip++;
  164                         switch (i_byte) {
  165                         case PUSHF:
  166                                 if (vmf->vmf_eflags & PSL_VIF)
  167                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  168                                             | PSL_IOPL | PSL_I, vmf);
  169                                 else
  170                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  171                                             | PSL_IOPL, vmf);
  172                                 vmf->vmf_ip += inc_ip;
  173                                 return (0);
  174 
  175                         case POPF:
  176                                 temp_flags = POPL(vmf) & POP_MASK;
  177                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  178                                     | temp_flags | PSL_VM | PSL_I;
  179                                 vmf->vmf_ip += inc_ip;
  180                                 if (temp_flags & PSL_I) {
  181                                         vmf->vmf_eflags |= PSL_VIF;
  182                                         if (vmf->vmf_eflags & PSL_VIP)
  183                                                 break;
  184                                 } else {
  185                                         vmf->vmf_eflags &= ~PSL_VIF;
  186                                 }
  187                                 return (0);
  188                         }
  189                         break;
  190 
  191                 /* VME faults here if VIP is set, but does not set VIF. */
  192                 case STI:
  193                         vmf->vmf_eflags |= PSL_VIF;
  194                         vmf->vmf_ip += inc_ip;
  195                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
  196                                 uprintf("fatal sti\n");
  197                                 return (SIGKILL);
  198                         }
  199                         break;
  200 
  201                 /* VME if no redirection support */
  202                 case INTn:
  203                         break;
  204 
  205                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  206                 case POPF:
  207                         temp_flags = POP(vmf) & POP_MASK;
  208                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  209                             | temp_flags | PSL_VM | PSL_I;
  210                         vmf->vmf_ip += inc_ip;
  211                         if (temp_flags & PSL_I) {
  212                                 vmf->vmf_eflags |= PSL_VIF;
  213                                 if (vmf->vmf_eflags & PSL_VIP)
  214                                         break;
  215                         } else {
  216                                 vmf->vmf_eflags &= ~PSL_VIF;
  217                         }
  218                         return (retcode);
  219 
  220                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  221                 case IRET:
  222                         vmf->vmf_ip = POP(vmf);
  223                         vmf->vmf_cs = POP(vmf);
  224                         temp_flags = POP(vmf) & POP_MASK;
  225                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  226                             | temp_flags | PSL_VM | PSL_I;
  227                         if (temp_flags & PSL_I) {
  228                                 vmf->vmf_eflags |= PSL_VIF;
  229                                 if (vmf->vmf_eflags & PSL_VIP)
  230                                         break;
  231                         } else {
  232                                 vmf->vmf_eflags &= ~PSL_VIF;
  233                         }
  234                         return (retcode);
  235 
  236                 }
  237                 return (SIGBUS);
  238         }
  239 
  240         switch (i_byte) {
  241         case OPERAND_SIZE_PREFIX:
  242                 i_byte = fubyte(++addr);
  243                 inc_ip++;
  244                 switch (i_byte) {
  245                 case PUSHF:
  246                         if (vm86->vm86_eflags & PSL_VIF)
  247                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  248                                     | PSL_IOPL | PSL_I, vmf);
  249                         else
  250                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  251                                     | PSL_IOPL, vmf);
  252                         vmf->vmf_ip += inc_ip;
  253                         return (retcode);
  254 
  255                 case POPF:
  256                         temp_flags = POPL(vmf) & POP_MASK;
  257                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  258                             | temp_flags | PSL_VM | PSL_I;
  259                         vmf->vmf_ip += inc_ip;
  260                         if (temp_flags & PSL_I) {
  261                                 vm86->vm86_eflags |= PSL_VIF;
  262                                 if (vm86->vm86_eflags & PSL_VIP)
  263                                         break;
  264                         } else {
  265                                 vm86->vm86_eflags &= ~PSL_VIF;
  266                         }
  267                         return (retcode);
  268                 }
  269                 return (SIGBUS);
  270 
  271         case CLI:
  272                 vm86->vm86_eflags &= ~PSL_VIF;
  273                 vmf->vmf_ip += inc_ip;
  274                 return (retcode);
  275 
  276         case STI:
  277                 /* if there is a pending interrupt, go to the emulator */
  278                 vm86->vm86_eflags |= PSL_VIF;
  279                 vmf->vmf_ip += inc_ip;
  280                 if (vm86->vm86_eflags & PSL_VIP)
  281                         break;
  282                 return (retcode);
  283 
  284         case PUSHF:
  285                 if (vm86->vm86_eflags & PSL_VIF)
  286                         PUSH((vmf->vmf_flags & PUSH_MASK)
  287                             | PSL_IOPL | PSL_I, vmf);
  288                 else
  289                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  290                 vmf->vmf_ip += inc_ip;
  291                 return (retcode);
  292 
  293         case INTn:
  294                 i_byte = fubyte(addr + 1);
  295                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
  296                         break;
  297                 if (vm86->vm86_eflags & PSL_VIF)
  298                         PUSH((vmf->vmf_flags & PUSH_MASK)
  299                             | PSL_IOPL | PSL_I, vmf);
  300                 else
  301                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  302                 PUSH(vmf->vmf_cs, vmf);
  303                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
  304                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
  305                      &vmf->vmf_cs, &vmf->vmf_ip);
  306                 vmf->vmf_flags &= ~PSL_T;
  307                 vm86->vm86_eflags &= ~PSL_VIF;
  308                 return (retcode);
  309 
  310         case IRET:
  311                 vmf->vmf_ip = POP(vmf);
  312                 vmf->vmf_cs = POP(vmf);
  313                 temp_flags = POP(vmf) & POP_MASK;
  314                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  315                     | temp_flags | PSL_VM | PSL_I;
  316                 if (temp_flags & PSL_I) {
  317                         vm86->vm86_eflags |= PSL_VIF;
  318                         if (vm86->vm86_eflags & PSL_VIP)
  319                                 break;
  320                 } else {
  321                         vm86->vm86_eflags &= ~PSL_VIF;
  322                 }
  323                 return (retcode);
  324 
  325         case POPF:
  326                 temp_flags = POP(vmf) & POP_MASK;
  327                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  328                     | temp_flags | PSL_VM | PSL_I;
  329                 vmf->vmf_ip += inc_ip;
  330                 if (temp_flags & PSL_I) {
  331                         vm86->vm86_eflags |= PSL_VIF;
  332                         if (vm86->vm86_eflags & PSL_VIP)
  333                                 break;
  334                 } else {
  335                         vm86->vm86_eflags &= ~PSL_VIF;
  336                 }
  337                 return (retcode);
  338         }
  339         return (SIGBUS);
  340 }
  341 
  342 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
  343 #define INTMAP_SIZE     32
  344 #define IOMAP_SIZE      ctob(IOPAGES)
  345 #define TSS_SIZE \
  346         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
  347          INTMAP_SIZE + IOMAP_SIZE + 1)
  348 
  349 struct vm86_layout {
  350         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
  351         struct  pcb vml_pcb;
  352         struct  pcb_ext vml_ext;
  353         char    vml_intmap[INTMAP_SIZE];
  354         char    vml_iomap[IOMAP_SIZE];
  355         char    vml_iomap_trailer;
  356 };
  357 
  358 void
  359 vm86_initialize(void)
  360 {
  361         int i;
  362         u_int *addr;
  363         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
  364         struct pcb *pcb;
  365         struct pcb_ext *ext;
  366         struct soft_segment_descriptor ssd = {
  367                 0,                      /* segment base address (overwritten) */
  368                 0,                      /* length (overwritten) */
  369                 SDT_SYS386TSS,          /* segment type */
  370                 0,                      /* priority level */
  371                 1,                      /* descriptor present */
  372                 0, 0,
  373                 0,                      /* default 16 size */
  374                 0                       /* granularity */
  375         };
  376 
  377         /*
  378          * this should be a compile time error, but cpp doesn't grok sizeof().
  379          */
  380         if (sizeof(struct vm86_layout) > ctob(3))
  381                 panic("struct vm86_layout exceeds space allocated in locore.s");
  382 
  383         /*
  384          * Below is the memory layout that we use for the vm86 region.
  385          *
  386          * +--------+
  387          * |        | 
  388          * |        |
  389          * | page 0 |       
  390          * |        | +--------+
  391          * |        | | stack  |
  392          * +--------+ +--------+ <--------- vm86paddr
  393          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
  394          * |        | +--------+
  395          * |        | |  PCB   | size: ~240 bytes
  396          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
  397          * |        | +--------+
  398          * |        | |int map |
  399          * |        | +--------+
  400          * +--------+ |        |
  401          * | page 2 | |  I/O   |
  402          * +--------+ | bitmap |
  403          * | page 3 | |        |
  404          * |        | +--------+
  405          * +--------+ 
  406          */
  407 
  408         /*
  409          * A rudimentary PCB must be installed, in order to get to the
  410          * PCB extension area.  We use the PCB area as a scratchpad for
  411          * data storage, the layout of which is shown below.
  412          *
  413          * pcb_esi      = new PTD entry 0
  414          * pcb_ebp      = pointer to frame on vm86 stack
  415          * pcb_esp      =    stack frame pointer at time of switch
  416          * pcb_ebx      = va of vm86 page table
  417          * pcb_eip      =    argument pointer to initial call
  418          * pcb_spare[0] =    saved TSS descriptor, word 0
  419          * pcb_space[1] =    saved TSS descriptor, word 1
  420          */
  421 #define new_ptd         pcb_esi
  422 #define vm86_frame      pcb_ebp
  423 #define pgtable_va      pcb_ebx
  424 
  425         pcb = &vml->vml_pcb;
  426         ext = &vml->vml_ext;
  427 
  428         mtx_init(&vm86_lock, "vm86 lock", NULL, MTX_DEF);
  429 
  430         bzero(pcb, sizeof(struct pcb));
  431         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
  432         pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
  433         pcb->pgtable_va = vm86paddr;
  434         pcb->pcb_flags = PCB_VM86CALL; 
  435         pcb->pcb_ext = ext;
  436 
  437         bzero(ext, sizeof(struct pcb_ext)); 
  438         ext->ext_tss.tss_esp0 = vm86paddr;
  439         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  440         ext->ext_tss.tss_ioopt = 
  441                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
  442         ext->ext_iomap = vml->vml_iomap;
  443         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
  444 
  445         if (cpu_feature & CPUID_VME)
  446                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  447 
  448         addr = (u_int *)ext->ext_vm86.vm86_intmap;
  449         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
  450                 *addr++ = 0;
  451         vml->vml_iomap_trailer = 0xff;
  452 
  453         ssd.ssd_base = (u_int)&ext->ext_tss;
  454         ssd.ssd_limit = TSS_SIZE - 1; 
  455         ssdtosd(&ssd, &ext->ext_tssd);
  456 
  457         vm86pcb = pcb;
  458 
  459 #if 0
  460         /*
  461          * use whatever is leftover of the vm86 page layout as a
  462          * message buffer so we can capture early output.
  463          */
  464         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
  465             ctob(3) - sizeof(struct vm86_layout));
  466 #endif
  467 }
  468 
  469 vm_offset_t
  470 vm86_getpage(struct vm86context *vmc, int pagenum)
  471 {
  472         int i;
  473 
  474         for (i = 0; i < vmc->npages; i++)
  475                 if (vmc->pmap[i].pte_num == pagenum)
  476                         return (vmc->pmap[i].kva);
  477         return (0);
  478 }
  479 
  480 vm_offset_t
  481 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
  482 {
  483         int i, flags = 0;
  484 
  485         for (i = 0; i < vmc->npages; i++)
  486                 if (vmc->pmap[i].pte_num == pagenum)
  487                         goto overlap;
  488 
  489         if (vmc->npages == VM86_PMAPSIZE)
  490                 goto full;                      /* XXX grow map? */
  491 
  492         if (kva == 0) {
  493                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
  494                 flags = VMAP_MALLOC;
  495         }
  496 
  497         i = vmc->npages++;
  498         vmc->pmap[i].flags = flags;
  499         vmc->pmap[i].kva = kva;
  500         vmc->pmap[i].pte_num = pagenum;
  501         return (kva);
  502 overlap:
  503         panic("vm86_addpage: overlap");
  504 full:
  505         panic("vm86_addpage: not enough room");
  506 }
  507 
  508 static void
  509 vm86_initflags(struct vm86frame *vmf)
  510 {
  511         int eflags = vmf->vmf_eflags;
  512         struct vm86_kernel *vm86 = &PCPU_GET(curpcb)->pcb_ext->ext_vm86;
  513 
  514         if (vm86->vm86_has_vme) {
  515                 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
  516                     (eflags & VME_USERCHANGE) | PSL_VM;
  517         } else {
  518                 vm86->vm86_eflags = eflags;     /* save VIF, VIP */
  519                 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |             
  520                     (eflags & VM_USERCHANGE) | PSL_VM;
  521         }
  522         vmf->vmf_eflags = eflags | PSL_VM;
  523 }
  524 
  525 /*
  526  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
  527  */
  528 void
  529 vm86_prepcall(struct vm86frame vmf)
  530 {
  531         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
  532         u_char intcall[] = {
  533                 CLI, INTn, 0x00, STI, HLT
  534         };
  535 
  536         if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
  537                 /* interrupt call requested */
  538                 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
  539                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
  540                 vmf.vmf_ip = addr[0];
  541                 vmf.vmf_cs = 0;
  542         }
  543         vmf.vmf_sp = addr[1] - 2;              /* keep aligned */
  544         vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
  545         vmf.vmf_ss = 0;
  546         vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
  547         vm86_initflags(&vmf);
  548 }
  549 
  550 /*
  551  * vm86 trap handler; determines whether routine succeeded or not.
  552  * Called while in vm86 space, returns to calling process.
  553  */
  554 void
  555 vm86_trap(struct vm86frame *vmf)
  556 {
  557         caddr_t addr;
  558 
  559         /* "should not happen" */
  560         if ((vmf->vmf_eflags & PSL_VM) == 0)
  561                 panic("vm86_trap called, but not in vm86 mode");
  562 
  563         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  564         if (*(u_char *)addr == HLT)
  565                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
  566         else
  567                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
  568 
  569         vm86_biosret(vmf);
  570 }
  571 
  572 int
  573 vm86_intcall(int intnum, struct vm86frame *vmf)
  574 {
  575         int retval;
  576 
  577         if (intnum < 0 || intnum > 0xff)
  578                 return (EINVAL);
  579 
  580         vmf->vmf_trapno = intnum;
  581         mtx_lock(&vm86_lock);
  582         critical_enter();
  583         retval = vm86_bioscall(vmf);
  584         critical_exit();
  585         mtx_unlock(&vm86_lock);
  586         return (retval);
  587 }
  588 
  589 /*
  590  * struct vm86context contains the page table to use when making
  591  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
  592  * the "interrupt trampoline" will be used, otherwise we use the
  593  * caller's cs:ip routine.  
  594  */
  595 int
  596 vm86_datacall(intnum, vmf, vmc)
  597         int intnum;
  598         struct vm86frame *vmf;
  599         struct vm86context *vmc;
  600 {
  601         pt_entry_t *pte = (pt_entry_t *)vm86paddr;
  602         vm_paddr_t page;
  603         int i, entry, retval;
  604 
  605         mtx_lock(&vm86_lock);
  606         for (i = 0; i < vmc->npages; i++) {
  607                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
  608                 entry = vmc->pmap[i].pte_num; 
  609                 vmc->pmap[i].old_pte = pte[entry];
  610                 pte[entry] = page | PG_V | PG_RW | PG_U;
  611                 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
  612         }
  613 
  614         vmf->vmf_trapno = intnum;
  615         critical_enter();
  616         retval = vm86_bioscall(vmf);
  617         critical_exit();
  618 
  619         for (i = 0; i < vmc->npages; i++) {
  620                 entry = vmc->pmap[i].pte_num;
  621                 pte[entry] = vmc->pmap[i].old_pte;
  622                 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
  623         }
  624         mtx_unlock(&vm86_lock);
  625 
  626         return (retval);
  627 }
  628 
  629 vm_offset_t
  630 vm86_getaddr(vmc, sel, off)
  631         struct vm86context *vmc;
  632         u_short sel;
  633         u_short off;
  634 {
  635         int i, page;
  636         vm_offset_t addr;
  637 
  638         addr = (vm_offset_t)MAKE_ADDR(sel, off);
  639         page = addr >> PAGE_SHIFT;
  640         for (i = 0; i < vmc->npages; i++)
  641                 if (page == vmc->pmap[i].pte_num)
  642                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
  643         return (0);
  644 }
  645 
  646 int
  647 vm86_getptr(vmc, kva, sel, off)
  648         struct vm86context *vmc;
  649         vm_offset_t kva;
  650         u_short *sel;
  651         u_short *off;
  652 {
  653         int i;
  654 
  655         for (i = 0; i < vmc->npages; i++)
  656                 if (kva >= vmc->pmap[i].kva &&
  657                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
  658                         *off = kva - vmc->pmap[i].kva;
  659                         *sel = vmc->pmap[i].pte_num << 8;
  660                         return (1);
  661                 }
  662         return (0);
  663         panic("vm86_getptr: address not found");
  664 }
  665         
  666 int
  667 vm86_sysarch(td, args)
  668         struct thread *td;
  669         char *args;
  670 {
  671         int error = 0;
  672         struct i386_vm86_args ua;
  673         struct vm86_kernel *vm86;
  674 
  675         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
  676                 return (error);
  677 
  678         if (td->td_pcb->pcb_ext == 0)
  679                 if ((error = i386_extend_pcb(td)) != 0)
  680                         return (error);
  681         vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  682 
  683         switch (ua.sub_op) {
  684         case VM86_INIT: {
  685                 struct vm86_init_args sa;
  686 
  687                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
  688                         return (error);
  689                 if (cpu_feature & CPUID_VME)
  690                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  691                 else
  692                         vm86->vm86_has_vme = 0;
  693                 vm86->vm86_inited = 1;
  694                 vm86->vm86_debug = sa.debug;
  695                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
  696                 }
  697                 break;
  698 
  699 #if 0
  700         case VM86_SET_VME: {
  701                 struct vm86_vme_args sa;
  702         
  703                 if ((cpu_feature & CPUID_VME) == 0)
  704                         return (ENODEV);
  705 
  706                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
  707                         return (error);
  708                 if (sa.state)
  709                         load_cr4(rcr4() | CR4_VME);
  710                 else
  711                         load_cr4(rcr4() & ~CR4_VME);
  712                 }
  713                 break;
  714 #endif
  715 
  716         case VM86_GET_VME: {
  717                 struct vm86_vme_args sa;
  718 
  719                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
  720                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  721                 }
  722                 break;
  723 
  724         case VM86_INTCALL: {
  725                 struct vm86_intcall_args sa;
  726 
  727                 if ((error = suser(td)))
  728                         return (error);
  729                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
  730                         return (error);
  731                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
  732                         return (error);
  733                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  734                 }
  735                 break;
  736 
  737         default:
  738                 error = EINVAL;
  739         }
  740         return (error);
  741 }

Cache object: 1ad0453b92b0d31d76b9265691f8843c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.