The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/vm86.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Jonathan Lemon
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/9.0/sys/i386/i386/vm86.c 189004 2009-02-24 18:09:31Z rdivacky $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/priv.h>
   33 #include <sys/proc.h>
   34 #include <sys/lock.h>
   35 #include <sys/malloc.h>
   36 #include <sys/mutex.h>
   37 
   38 #include <vm/vm.h>
   39 #include <vm/pmap.h>
   40 #include <vm/vm_map.h>
   41 #include <vm/vm_page.h>
   42 
   43 #include <machine/md_var.h>
   44 #include <machine/pcb.h>
   45 #include <machine/pcb_ext.h>
   46 #include <machine/psl.h>
   47 #include <machine/specialreg.h>
   48 #include <machine/sysarch.h>
   49 
   50 extern int vm86pa;
   51 extern struct pcb *vm86pcb;
   52 
   53 static struct mtx vm86_lock;
   54 
   55 extern int vm86_bioscall(struct vm86frame *);
   56 extern void vm86_biosret(struct vm86frame *);
   57 
   58 void vm86_prepcall(struct vm86frame *);
   59 
   60 struct system_map {
   61         int             type;
   62         vm_offset_t     start;
   63         vm_offset_t     end;
   64 };
   65 
   66 #define HLT     0xf4
   67 #define CLI     0xfa
   68 #define STI     0xfb
   69 #define PUSHF   0x9c
   70 #define POPF    0x9d
   71 #define INTn    0xcd
   72 #define IRET    0xcf
   73 #define CALLm   0xff
   74 #define OPERAND_SIZE_PREFIX     0x66
   75 #define ADDRESS_SIZE_PREFIX     0x67
   76 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
   77 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
   78 
   79 static __inline caddr_t
   80 MAKE_ADDR(u_short sel, u_short off)
   81 {
   82         return ((caddr_t)((sel << 4) + off));
   83 }
   84 
   85 static __inline void
   86 GET_VEC(u_int vec, u_short *sel, u_short *off)
   87 {
   88         *sel = vec >> 16;
   89         *off = vec & 0xffff;
   90 }
   91 
   92 static __inline u_int
   93 MAKE_VEC(u_short sel, u_short off)
   94 {
   95         return ((sel << 16) | off);
   96 }
   97 
   98 static __inline void
   99 PUSH(u_short x, struct vm86frame *vmf)
  100 {
  101         vmf->vmf_sp -= 2;
  102         suword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  103 }
  104 
  105 static __inline void
  106 PUSHL(u_int x, struct vm86frame *vmf)
  107 {
  108         vmf->vmf_sp -= 4;
  109         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  110 }
  111 
  112 static __inline u_short
  113 POP(struct vm86frame *vmf)
  114 {
  115         u_short x = fuword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  116 
  117         vmf->vmf_sp += 2;
  118         return (x);
  119 }
  120 
  121 static __inline u_int
  122 POPL(struct vm86frame *vmf)
  123 {
  124         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  125 
  126         vmf->vmf_sp += 4;
  127         return (x);
  128 }
  129 
  130 int
  131 vm86_emulate(vmf)
  132         struct vm86frame *vmf;
  133 {
  134         struct vm86_kernel *vm86;
  135         caddr_t addr;
  136         u_char i_byte;
  137         u_int temp_flags;
  138         int inc_ip = 1;
  139         int retcode = 0;
  140 
  141         /*
  142          * pcb_ext contains the address of the extension area, or zero if
  143          * the extension is not present.  (This check should not be needed,
  144          * as we can't enter vm86 mode until we set up an extension area)
  145          */
  146         if (PCPU_GET(curpcb)->pcb_ext == 0)
  147                 return (SIGBUS);
  148         vm86 = &PCPU_GET(curpcb)->pcb_ext->ext_vm86;
  149 
  150         if (vmf->vmf_eflags & PSL_T)
  151                 retcode = SIGTRAP;
  152 
  153         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  154         i_byte = fubyte(addr);
  155         if (i_byte == ADDRESS_SIZE_PREFIX) {
  156                 i_byte = fubyte(++addr);
  157                 inc_ip++;
  158         }
  159 
  160         if (vm86->vm86_has_vme) {
  161                 switch (i_byte) {
  162                 case OPERAND_SIZE_PREFIX:
  163                         i_byte = fubyte(++addr);
  164                         inc_ip++;
  165                         switch (i_byte) {
  166                         case PUSHF:
  167                                 if (vmf->vmf_eflags & PSL_VIF)
  168                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  169                                             | PSL_IOPL | PSL_I, vmf);
  170                                 else
  171                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  172                                             | PSL_IOPL, vmf);
  173                                 vmf->vmf_ip += inc_ip;
  174                                 return (0);
  175 
  176                         case POPF:
  177                                 temp_flags = POPL(vmf) & POP_MASK;
  178                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  179                                     | temp_flags | PSL_VM | PSL_I;
  180                                 vmf->vmf_ip += inc_ip;
  181                                 if (temp_flags & PSL_I) {
  182                                         vmf->vmf_eflags |= PSL_VIF;
  183                                         if (vmf->vmf_eflags & PSL_VIP)
  184                                                 break;
  185                                 } else {
  186                                         vmf->vmf_eflags &= ~PSL_VIF;
  187                                 }
  188                                 return (0);
  189                         }
  190                         break;
  191 
  192                 /* VME faults here if VIP is set, but does not set VIF. */
  193                 case STI:
  194                         vmf->vmf_eflags |= PSL_VIF;
  195                         vmf->vmf_ip += inc_ip;
  196                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
  197                                 uprintf("fatal sti\n");
  198                                 return (SIGKILL);
  199                         }
  200                         break;
  201 
  202                 /* VME if no redirection support */
  203                 case INTn:
  204                         break;
  205 
  206                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  207                 case POPF:
  208                         temp_flags = POP(vmf) & POP_MASK;
  209                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  210                             | temp_flags | PSL_VM | PSL_I;
  211                         vmf->vmf_ip += inc_ip;
  212                         if (temp_flags & PSL_I) {
  213                                 vmf->vmf_eflags |= PSL_VIF;
  214                                 if (vmf->vmf_eflags & PSL_VIP)
  215                                         break;
  216                         } else {
  217                                 vmf->vmf_eflags &= ~PSL_VIF;
  218                         }
  219                         return (retcode);
  220 
  221                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  222                 case IRET:
  223                         vmf->vmf_ip = POP(vmf);
  224                         vmf->vmf_cs = POP(vmf);
  225                         temp_flags = POP(vmf) & POP_MASK;
  226                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  227                             | temp_flags | PSL_VM | PSL_I;
  228                         if (temp_flags & PSL_I) {
  229                                 vmf->vmf_eflags |= PSL_VIF;
  230                                 if (vmf->vmf_eflags & PSL_VIP)
  231                                         break;
  232                         } else {
  233                                 vmf->vmf_eflags &= ~PSL_VIF;
  234                         }
  235                         return (retcode);
  236 
  237                 }
  238                 return (SIGBUS);
  239         }
  240 
  241         switch (i_byte) {
  242         case OPERAND_SIZE_PREFIX:
  243                 i_byte = fubyte(++addr);
  244                 inc_ip++;
  245                 switch (i_byte) {
  246                 case PUSHF:
  247                         if (vm86->vm86_eflags & PSL_VIF)
  248                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  249                                     | PSL_IOPL | PSL_I, vmf);
  250                         else
  251                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  252                                     | PSL_IOPL, vmf);
  253                         vmf->vmf_ip += inc_ip;
  254                         return (retcode);
  255 
  256                 case POPF:
  257                         temp_flags = POPL(vmf) & POP_MASK;
  258                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  259                             | temp_flags | PSL_VM | PSL_I;
  260                         vmf->vmf_ip += inc_ip;
  261                         if (temp_flags & PSL_I) {
  262                                 vm86->vm86_eflags |= PSL_VIF;
  263                                 if (vm86->vm86_eflags & PSL_VIP)
  264                                         break;
  265                         } else {
  266                                 vm86->vm86_eflags &= ~PSL_VIF;
  267                         }
  268                         return (retcode);
  269                 }
  270                 return (SIGBUS);
  271 
  272         case CLI:
  273                 vm86->vm86_eflags &= ~PSL_VIF;
  274                 vmf->vmf_ip += inc_ip;
  275                 return (retcode);
  276 
  277         case STI:
  278                 /* if there is a pending interrupt, go to the emulator */
  279                 vm86->vm86_eflags |= PSL_VIF;
  280                 vmf->vmf_ip += inc_ip;
  281                 if (vm86->vm86_eflags & PSL_VIP)
  282                         break;
  283                 return (retcode);
  284 
  285         case PUSHF:
  286                 if (vm86->vm86_eflags & PSL_VIF)
  287                         PUSH((vmf->vmf_flags & PUSH_MASK)
  288                             | PSL_IOPL | PSL_I, vmf);
  289                 else
  290                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  291                 vmf->vmf_ip += inc_ip;
  292                 return (retcode);
  293 
  294         case INTn:
  295                 i_byte = fubyte(addr + 1);
  296                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
  297                         break;
  298                 if (vm86->vm86_eflags & PSL_VIF)
  299                         PUSH((vmf->vmf_flags & PUSH_MASK)
  300                             | PSL_IOPL | PSL_I, vmf);
  301                 else
  302                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  303                 PUSH(vmf->vmf_cs, vmf);
  304                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
  305                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
  306                      &vmf->vmf_cs, &vmf->vmf_ip);
  307                 vmf->vmf_flags &= ~PSL_T;
  308                 vm86->vm86_eflags &= ~PSL_VIF;
  309                 return (retcode);
  310 
  311         case IRET:
  312                 vmf->vmf_ip = POP(vmf);
  313                 vmf->vmf_cs = POP(vmf);
  314                 temp_flags = POP(vmf) & POP_MASK;
  315                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  316                     | temp_flags | PSL_VM | PSL_I;
  317                 if (temp_flags & PSL_I) {
  318                         vm86->vm86_eflags |= PSL_VIF;
  319                         if (vm86->vm86_eflags & PSL_VIP)
  320                                 break;
  321                 } else {
  322                         vm86->vm86_eflags &= ~PSL_VIF;
  323                 }
  324                 return (retcode);
  325 
  326         case POPF:
  327                 temp_flags = POP(vmf) & POP_MASK;
  328                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  329                     | temp_flags | PSL_VM | PSL_I;
  330                 vmf->vmf_ip += inc_ip;
  331                 if (temp_flags & PSL_I) {
  332                         vm86->vm86_eflags |= PSL_VIF;
  333                         if (vm86->vm86_eflags & PSL_VIP)
  334                                 break;
  335                 } else {
  336                         vm86->vm86_eflags &= ~PSL_VIF;
  337                 }
  338                 return (retcode);
  339         }
  340         return (SIGBUS);
  341 }
  342 
  343 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
  344 #define INTMAP_SIZE     32
  345 #define IOMAP_SIZE      ctob(IOPAGES)
  346 #define TSS_SIZE \
  347         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
  348          INTMAP_SIZE + IOMAP_SIZE + 1)
  349 
  350 struct vm86_layout {
  351         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
  352         struct  pcb vml_pcb;
  353         struct  pcb_ext vml_ext;
  354         char    vml_intmap[INTMAP_SIZE];
  355         char    vml_iomap[IOMAP_SIZE];
  356         char    vml_iomap_trailer;
  357 };
  358 
  359 void
  360 vm86_initialize(void)
  361 {
  362         int i;
  363         u_int *addr;
  364         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
  365         struct pcb *pcb;
  366         struct pcb_ext *ext;
  367         struct soft_segment_descriptor ssd = {
  368                 0,                      /* segment base address (overwritten) */
  369                 0,                      /* length (overwritten) */
  370                 SDT_SYS386TSS,          /* segment type */
  371                 0,                      /* priority level */
  372                 1,                      /* descriptor present */
  373                 0, 0,
  374                 0,                      /* default 16 size */
  375                 0                       /* granularity */
  376         };
  377 
  378         /*
  379          * this should be a compile time error, but cpp doesn't grok sizeof().
  380          */
  381         if (sizeof(struct vm86_layout) > ctob(3))
  382                 panic("struct vm86_layout exceeds space allocated in locore.s");
  383 
  384         /*
  385          * Below is the memory layout that we use for the vm86 region.
  386          *
  387          * +--------+
  388          * |        | 
  389          * |        |
  390          * | page 0 |       
  391          * |        | +--------+
  392          * |        | | stack  |
  393          * +--------+ +--------+ <--------- vm86paddr
  394          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
  395          * |        | +--------+
  396          * |        | |  PCB   | size: ~240 bytes
  397          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
  398          * |        | +--------+
  399          * |        | |int map |
  400          * |        | +--------+
  401          * +--------+ |        |
  402          * | page 2 | |  I/O   |
  403          * +--------+ | bitmap |
  404          * | page 3 | |        |
  405          * |        | +--------+
  406          * +--------+ 
  407          */
  408 
  409         /*
  410          * A rudimentary PCB must be installed, in order to get to the
  411          * PCB extension area.  We use the PCB area as a scratchpad for
  412          * data storage, the layout of which is shown below.
  413          *
  414          * pcb_esi      = new PTD entry 0
  415          * pcb_ebp      = pointer to frame on vm86 stack
  416          * pcb_esp      =    stack frame pointer at time of switch
  417          * pcb_ebx      = va of vm86 page table
  418          * pcb_eip      =    argument pointer to initial call
  419          * pcb_spare[0] =    saved TSS descriptor, word 0
  420          * pcb_space[1] =    saved TSS descriptor, word 1
  421          */
  422 #define new_ptd         pcb_esi
  423 #define vm86_frame      pcb_ebp
  424 #define pgtable_va      pcb_ebx
  425 
  426         pcb = &vml->vml_pcb;
  427         ext = &vml->vml_ext;
  428 
  429         mtx_init(&vm86_lock, "vm86 lock", NULL, MTX_DEF);
  430 
  431         bzero(pcb, sizeof(struct pcb));
  432         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
  433         pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
  434         pcb->pgtable_va = vm86paddr;
  435         pcb->pcb_flags = PCB_VM86CALL; 
  436         pcb->pcb_ext = ext;
  437 
  438         bzero(ext, sizeof(struct pcb_ext)); 
  439         ext->ext_tss.tss_esp0 = vm86paddr;
  440         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  441         ext->ext_tss.tss_ioopt = 
  442                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
  443         ext->ext_iomap = vml->vml_iomap;
  444         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
  445 
  446         if (cpu_feature & CPUID_VME)
  447                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  448 
  449         addr = (u_int *)ext->ext_vm86.vm86_intmap;
  450         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
  451                 *addr++ = 0;
  452         vml->vml_iomap_trailer = 0xff;
  453 
  454         ssd.ssd_base = (u_int)&ext->ext_tss;
  455         ssd.ssd_limit = TSS_SIZE - 1; 
  456         ssdtosd(&ssd, &ext->ext_tssd);
  457 
  458         vm86pcb = pcb;
  459 
  460 #if 0
  461         /*
  462          * use whatever is leftover of the vm86 page layout as a
  463          * message buffer so we can capture early output.
  464          */
  465         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
  466             ctob(3) - sizeof(struct vm86_layout));
  467 #endif
  468 }
  469 
  470 vm_offset_t
  471 vm86_getpage(struct vm86context *vmc, int pagenum)
  472 {
  473         int i;
  474 
  475         for (i = 0; i < vmc->npages; i++)
  476                 if (vmc->pmap[i].pte_num == pagenum)
  477                         return (vmc->pmap[i].kva);
  478         return (0);
  479 }
  480 
  481 vm_offset_t
  482 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
  483 {
  484         int i, flags = 0;
  485 
  486         for (i = 0; i < vmc->npages; i++)
  487                 if (vmc->pmap[i].pte_num == pagenum)
  488                         goto overlap;
  489 
  490         if (vmc->npages == VM86_PMAPSIZE)
  491                 goto full;                      /* XXX grow map? */
  492 
  493         if (kva == 0) {
  494                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
  495                 flags = VMAP_MALLOC;
  496         }
  497 
  498         i = vmc->npages++;
  499         vmc->pmap[i].flags = flags;
  500         vmc->pmap[i].kva = kva;
  501         vmc->pmap[i].pte_num = pagenum;
  502         return (kva);
  503 overlap:
  504         panic("vm86_addpage: overlap");
  505 full:
  506         panic("vm86_addpage: not enough room");
  507 }
  508 
  509 /*
  510  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
  511  */
  512 void
  513 vm86_prepcall(struct vm86frame *vmf)
  514 {
  515         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
  516         u_char intcall[] = {
  517                 CLI, INTn, 0x00, STI, HLT
  518         };
  519         struct vm86_kernel *vm86;
  520 
  521         if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
  522                 /* interrupt call requested */
  523                 intcall[2] = (u_char)(vmf->vmf_trapno & 0xff);
  524                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
  525                 vmf->vmf_ip = addr[0];
  526                 vmf->vmf_cs = 0;
  527         }
  528         vmf->vmf_sp = addr[1] - 2;              /* keep aligned */
  529         vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = 0;
  530         vmf->vmf_ss = 0;
  531         vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
  532 
  533         vm86 = &PCPU_GET(curpcb)->pcb_ext->ext_vm86;
  534         if (!vm86->vm86_has_vme) 
  535                 vm86->vm86_eflags = vmf->vmf_eflags;  /* save VIF, VIP */
  536 }
  537 
  538 /*
  539  * vm86 trap handler; determines whether routine succeeded or not.
  540  * Called while in vm86 space, returns to calling process.
  541  */
  542 void
  543 vm86_trap(struct vm86frame *vmf)
  544 {
  545         caddr_t addr;
  546 
  547         /* "should not happen" */
  548         if ((vmf->vmf_eflags & PSL_VM) == 0)
  549                 panic("vm86_trap called, but not in vm86 mode");
  550 
  551         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  552         if (*(u_char *)addr == HLT)
  553                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
  554         else
  555                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
  556 
  557         vm86_biosret(vmf);
  558 }
  559 
  560 int
  561 vm86_intcall(int intnum, struct vm86frame *vmf)
  562 {
  563         int retval;
  564 
  565         if (intnum < 0 || intnum > 0xff)
  566                 return (EINVAL);
  567 
  568         vmf->vmf_trapno = intnum;
  569         mtx_lock(&vm86_lock);
  570         critical_enter();
  571         retval = vm86_bioscall(vmf);
  572         critical_exit();
  573         mtx_unlock(&vm86_lock);
  574         return (retval);
  575 }
  576 
  577 /*
  578  * struct vm86context contains the page table to use when making
  579  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
  580  * the "interrupt trampoline" will be used, otherwise we use the
  581  * caller's cs:ip routine.  
  582  */
  583 int
  584 vm86_datacall(intnum, vmf, vmc)
  585         int intnum;
  586         struct vm86frame *vmf;
  587         struct vm86context *vmc;
  588 {
  589         pt_entry_t *pte = (pt_entry_t *)vm86paddr;
  590         vm_paddr_t page;
  591         int i, entry, retval;
  592 
  593         mtx_lock(&vm86_lock);
  594         for (i = 0; i < vmc->npages; i++) {
  595                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
  596                 entry = vmc->pmap[i].pte_num; 
  597                 vmc->pmap[i].old_pte = pte[entry];
  598                 pte[entry] = page | PG_V | PG_RW | PG_U;
  599                 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
  600         }
  601 
  602         vmf->vmf_trapno = intnum;
  603         critical_enter();
  604         retval = vm86_bioscall(vmf);
  605         critical_exit();
  606 
  607         for (i = 0; i < vmc->npages; i++) {
  608                 entry = vmc->pmap[i].pte_num;
  609                 pte[entry] = vmc->pmap[i].old_pte;
  610                 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
  611         }
  612         mtx_unlock(&vm86_lock);
  613 
  614         return (retval);
  615 }
  616 
  617 vm_offset_t
  618 vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
  619 {
  620         int i, page;
  621         vm_offset_t addr;
  622 
  623         addr = (vm_offset_t)MAKE_ADDR(sel, off);
  624         page = addr >> PAGE_SHIFT;
  625         for (i = 0; i < vmc->npages; i++)
  626                 if (page == vmc->pmap[i].pte_num)
  627                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
  628         return (0);
  629 }
  630 
  631 int
  632 vm86_getptr(vmc, kva, sel, off)
  633         struct vm86context *vmc;
  634         vm_offset_t kva;
  635         u_short *sel;
  636         u_short *off;
  637 {
  638         int i;
  639 
  640         for (i = 0; i < vmc->npages; i++)
  641                 if (kva >= vmc->pmap[i].kva &&
  642                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
  643                         *off = kva - vmc->pmap[i].kva;
  644                         *sel = vmc->pmap[i].pte_num << 8;
  645                         return (1);
  646                 }
  647         return (0);
  648         panic("vm86_getptr: address not found");
  649 }
  650         
  651 int
  652 vm86_sysarch(td, args)
  653         struct thread *td;
  654         char *args;
  655 {
  656         int error = 0;
  657         struct i386_vm86_args ua;
  658         struct vm86_kernel *vm86;
  659 
  660         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
  661                 return (error);
  662 
  663         if (td->td_pcb->pcb_ext == 0)
  664                 if ((error = i386_extend_pcb(td)) != 0)
  665                         return (error);
  666         vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  667 
  668         switch (ua.sub_op) {
  669         case VM86_INIT: {
  670                 struct vm86_init_args sa;
  671 
  672                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
  673                         return (error);
  674                 if (cpu_feature & CPUID_VME)
  675                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  676                 else
  677                         vm86->vm86_has_vme = 0;
  678                 vm86->vm86_inited = 1;
  679                 vm86->vm86_debug = sa.debug;
  680                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
  681                 }
  682                 break;
  683 
  684 #if 0
  685         case VM86_SET_VME: {
  686                 struct vm86_vme_args sa;
  687         
  688                 if ((cpu_feature & CPUID_VME) == 0)
  689                         return (ENODEV);
  690 
  691                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
  692                         return (error);
  693                 if (sa.state)
  694                         load_cr4(rcr4() | CR4_VME);
  695                 else
  696                         load_cr4(rcr4() & ~CR4_VME);
  697                 }
  698                 break;
  699 #endif
  700 
  701         case VM86_GET_VME: {
  702                 struct vm86_vme_args sa;
  703 
  704                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
  705                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  706                 }
  707                 break;
  708 
  709         case VM86_INTCALL: {
  710                 struct vm86_intcall_args sa;
  711 
  712                 if ((error = priv_check(td, PRIV_VM86_INTCALL)))
  713                         return (error);
  714                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
  715                         return (error);
  716                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
  717                         return (error);
  718                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  719                 }
  720                 break;
  721 
  722         default:
  723                 error = EINVAL;
  724         }
  725         return (error);
  726 }

Cache object: 18158a0c5b4f662c40a1e4939baaa054


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.