The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/vm86.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Jonathan Lemon
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/proc.h>
   32 #include <sys/lock.h>
   33 #include <sys/malloc.h>
   34 
   35 #include <vm/vm.h>
   36 #include <vm/pmap.h>
   37 #include <vm/vm_map.h>
   38 #include <vm/vm_page.h>
   39 
   40 #include <sys/user.h>
   41 
   42 #include <machine/md_var.h>
   43 #include <machine/pcb_ext.h>    /* pcb.h included via sys/user.h */
   44 #include <machine/psl.h>
   45 #include <machine/specialreg.h>
   46 #include <machine/sysarch.h>
   47 
   48 extern int i386_extend_pcb      __P((struct proc *));
   49 extern int vm86pa;
   50 extern struct pcb *vm86pcb;
   51 
   52 extern int vm86_bioscall(struct vm86frame *);
   53 extern void vm86_biosret(struct vm86frame *);
   54 
   55 void vm86_prepcall(struct vm86frame);
   56 
   57 struct system_map {
   58         int             type;
   59         vm_offset_t     start;
   60         vm_offset_t     end;
   61 };
   62 
   63 #define HLT     0xf4
   64 #define CLI     0xfa
   65 #define STI     0xfb
   66 #define PUSHF   0x9c
   67 #define POPF    0x9d
   68 #define INTn    0xcd
   69 #define IRET    0xcf
   70 #define CALLm   0xff
   71 #define OPERAND_SIZE_PREFIX     0x66
   72 #define ADDRESS_SIZE_PREFIX     0x67
   73 #define PUSH_MASK       ~(PSL_VM | PSL_RF | PSL_I)
   74 #define POP_MASK        ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
   75 
   76 static __inline caddr_t
   77 MAKE_ADDR(u_short sel, u_short off)
   78 {
   79         return ((caddr_t)((sel << 4) + off));
   80 }
   81 
   82 static __inline void
   83 GET_VEC(u_int vec, u_short *sel, u_short *off)
   84 {
   85         *sel = vec >> 16;
   86         *off = vec & 0xffff;
   87 }
   88 
   89 static __inline u_int
   90 MAKE_VEC(u_short sel, u_short off)
   91 {
   92         return ((sel << 16) | off);
   93 }
   94 
   95 static __inline void
   96 PUSH(u_short x, struct vm86frame *vmf)
   97 {
   98         vmf->vmf_sp -= 2;
   99         susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  100 }
  101 
  102 static __inline void
  103 PUSHL(u_int x, struct vm86frame *vmf)
  104 {
  105         vmf->vmf_sp -= 4;
  106         suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
  107 }
  108 
  109 static __inline u_short
  110 POP(struct vm86frame *vmf)
  111 {
  112         u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  113 
  114         vmf->vmf_sp += 2;
  115         return (x);
  116 }
  117 
  118 static __inline u_int
  119 POPL(struct vm86frame *vmf)
  120 {
  121         u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
  122 
  123         vmf->vmf_sp += 4;
  124         return (x);
  125 }
  126 
  127 int
  128 vm86_emulate(vmf)
  129         struct vm86frame *vmf;
  130 {
  131         struct vm86_kernel *vm86;
  132         caddr_t addr;
  133         u_char i_byte;
  134         u_int temp_flags;
  135         int inc_ip = 1;
  136         int retcode = 0;
  137 
  138         /*
  139          * pcb_ext contains the address of the extension area, or zero if
  140          * the extension is not present.  (This check should not be needed,
  141          * as we can't enter vm86 mode until we set up an extension area)
  142          */
  143         if (curpcb->pcb_ext == 0)
  144                 return (SIGBUS);
  145         vm86 = &curpcb->pcb_ext->ext_vm86;
  146 
  147         if (vmf->vmf_eflags & PSL_T)
  148                 retcode = SIGTRAP;
  149 
  150         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  151         i_byte = fubyte(addr);
  152         if (i_byte == ADDRESS_SIZE_PREFIX) {
  153                 i_byte = fubyte(++addr);
  154                 inc_ip++;
  155         }
  156 
  157         if (vm86->vm86_has_vme) {
  158                 switch (i_byte) {
  159                 case OPERAND_SIZE_PREFIX:
  160                         i_byte = fubyte(++addr);
  161                         inc_ip++;
  162                         switch (i_byte) {
  163                         case PUSHF:
  164                                 if (vmf->vmf_eflags & PSL_VIF)
  165                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  166                                             | PSL_IOPL | PSL_I, vmf);
  167                                 else
  168                                         PUSHL((vmf->vmf_eflags & PUSH_MASK)
  169                                             | PSL_IOPL, vmf);
  170                                 vmf->vmf_ip += inc_ip;
  171                                 return (0);
  172 
  173                         case POPF:
  174                                 temp_flags = POPL(vmf) & POP_MASK;
  175                                 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  176                                     | temp_flags | PSL_VM | PSL_I;
  177                                 vmf->vmf_ip += inc_ip;
  178                                 if (temp_flags & PSL_I) {
  179                                         vmf->vmf_eflags |= PSL_VIF;
  180                                         if (vmf->vmf_eflags & PSL_VIP)
  181                                                 break;
  182                                 } else {
  183                                         vmf->vmf_eflags &= ~PSL_VIF;
  184                                 }
  185                                 return (0);
  186                         }
  187                         break;
  188 
  189                 /* VME faults here if VIP is set, but does not set VIF. */
  190                 case STI:
  191                         vmf->vmf_eflags |= PSL_VIF;
  192                         vmf->vmf_ip += inc_ip;
  193                         if ((vmf->vmf_eflags & PSL_VIP) == 0) {
  194                                 uprintf("fatal sti\n");
  195                                 return (SIGKILL);
  196                         }
  197                         break;
  198 
  199                 /* VME if no redirection support */
  200                 case INTn:
  201                         break;
  202 
  203                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  204                 case POPF:
  205                         temp_flags = POP(vmf) & POP_MASK;
  206                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  207                             | temp_flags | PSL_VM | PSL_I;
  208                         vmf->vmf_ip += inc_ip;
  209                         if (temp_flags & PSL_I) {
  210                                 vmf->vmf_eflags |= PSL_VIF;
  211                                 if (vmf->vmf_eflags & PSL_VIP)
  212                                         break;
  213                         } else {
  214                                 vmf->vmf_eflags &= ~PSL_VIF;
  215                         }
  216                         return (retcode);
  217 
  218                 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
  219                 case IRET:
  220                         vmf->vmf_ip = POP(vmf);
  221                         vmf->vmf_cs = POP(vmf);
  222                         temp_flags = POP(vmf) & POP_MASK;
  223                         vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  224                             | temp_flags | PSL_VM | PSL_I;
  225                         if (temp_flags & PSL_I) {
  226                                 vmf->vmf_eflags |= PSL_VIF;
  227                                 if (vmf->vmf_eflags & PSL_VIP)
  228                                         break;
  229                         } else {
  230                                 vmf->vmf_eflags &= ~PSL_VIF;
  231                         }
  232                         return (retcode);
  233 
  234                 }
  235                 return (SIGBUS);
  236         }
  237 
  238         switch (i_byte) {
  239         case OPERAND_SIZE_PREFIX:
  240                 i_byte = fubyte(++addr);
  241                 inc_ip++;
  242                 switch (i_byte) {
  243                 case PUSHF:
  244                         if (vm86->vm86_eflags & PSL_VIF)
  245                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  246                                     | PSL_IOPL | PSL_I, vmf);
  247                         else
  248                                 PUSHL((vmf->vmf_flags & PUSH_MASK)
  249                                     | PSL_IOPL, vmf);
  250                         vmf->vmf_ip += inc_ip;
  251                         return (retcode);
  252 
  253                 case POPF:
  254                         temp_flags = POPL(vmf) & POP_MASK;
  255                         vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
  256                             | temp_flags | PSL_VM | PSL_I;
  257                         vmf->vmf_ip += inc_ip;
  258                         if (temp_flags & PSL_I) {
  259                                 vm86->vm86_eflags |= PSL_VIF;
  260                                 if (vm86->vm86_eflags & PSL_VIP)
  261                                         break;
  262                         } else {
  263                                 vm86->vm86_eflags &= ~PSL_VIF;
  264                         }
  265                         return (retcode);
  266                 }
  267                 return (SIGBUS);
  268 
  269         case CLI:
  270                 vm86->vm86_eflags &= ~PSL_VIF;
  271                 vmf->vmf_ip += inc_ip;
  272                 return (retcode);
  273 
  274         case STI:
  275                 /* if there is a pending interrupt, go to the emulator */
  276                 vm86->vm86_eflags |= PSL_VIF;
  277                 vmf->vmf_ip += inc_ip;
  278                 if (vm86->vm86_eflags & PSL_VIP)
  279                         break;
  280                 return (retcode);
  281 
  282         case PUSHF:
  283                 if (vm86->vm86_eflags & PSL_VIF)
  284                         PUSH((vmf->vmf_flags & PUSH_MASK)
  285                             | PSL_IOPL | PSL_I, vmf);
  286                 else
  287                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  288                 vmf->vmf_ip += inc_ip;
  289                 return (retcode);
  290 
  291         case INTn:
  292                 i_byte = fubyte(addr + 1);
  293                 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
  294                         break;
  295                 if (vm86->vm86_eflags & PSL_VIF)
  296                         PUSH((vmf->vmf_flags & PUSH_MASK)
  297                             | PSL_IOPL | PSL_I, vmf);
  298                 else
  299                         PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
  300                 PUSH(vmf->vmf_cs, vmf);
  301                 PUSH(vmf->vmf_ip + inc_ip + 1, vmf);    /* increment IP */
  302                 GET_VEC(fuword((caddr_t)(i_byte * 4)),
  303                      &vmf->vmf_cs, &vmf->vmf_ip);
  304                 vmf->vmf_flags &= ~PSL_T;
  305                 vm86->vm86_eflags &= ~PSL_VIF;
  306                 return (retcode);
  307 
  308         case IRET:
  309                 vmf->vmf_ip = POP(vmf);
  310                 vmf->vmf_cs = POP(vmf);
  311                 temp_flags = POP(vmf) & POP_MASK;
  312                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  313                     | temp_flags | PSL_VM | PSL_I;
  314                 if (temp_flags & PSL_I) {
  315                         vm86->vm86_eflags |= PSL_VIF;
  316                         if (vm86->vm86_eflags & PSL_VIP)
  317                                 break;
  318                 } else {
  319                         vm86->vm86_eflags &= ~PSL_VIF;
  320                 }
  321                 return (retcode);
  322 
  323         case POPF:
  324                 temp_flags = POP(vmf) & POP_MASK;
  325                 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
  326                     | temp_flags | PSL_VM | PSL_I;
  327                 vmf->vmf_ip += inc_ip;
  328                 if (temp_flags & PSL_I) {
  329                         vm86->vm86_eflags |= PSL_VIF;
  330                         if (vm86->vm86_eflags & PSL_VIP)
  331                                 break;
  332                 } else {
  333                         vm86->vm86_eflags &= ~PSL_VIF;
  334                 }
  335                 return (retcode);
  336         }
  337         return (SIGBUS);
  338 }
  339 
  340 #define PGTABLE_SIZE    ((1024 + 64) * 1024 / PAGE_SIZE)
  341 #define INTMAP_SIZE     32
  342 #define IOMAP_SIZE      ctob(IOPAGES)
  343 #define TSS_SIZE \
  344         (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
  345          INTMAP_SIZE + IOMAP_SIZE + 1)
  346 
  347 struct vm86_layout {
  348         pt_entry_t      vml_pgtbl[PGTABLE_SIZE];
  349         struct  pcb vml_pcb;
  350         struct  pcb_ext vml_ext;
  351         char    vml_intmap[INTMAP_SIZE];
  352         char    vml_iomap[IOMAP_SIZE];
  353         char    vml_iomap_trailer;
  354 };
  355 
  356 void
  357 vm86_initialize(void)
  358 {
  359         int i;
  360         u_int *addr;
  361         struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
  362         struct pcb *pcb;
  363         struct pcb_ext *ext;
  364         struct soft_segment_descriptor ssd = {
  365                 0,                      /* segment base address (overwritten) */
  366                 0,                      /* length (overwritten) */
  367                 SDT_SYS386TSS,          /* segment type */
  368                 0,                      /* priority level */
  369                 1,                      /* descriptor present */
  370                 0, 0,
  371                 0,                      /* default 16 size */
  372                 0                       /* granularity */
  373         };
  374 
  375         /*
  376          * this should be a compile time error, but cpp doesn't grok sizeof().
  377          */
  378         if (sizeof(struct vm86_layout) > ctob(3))
  379                 panic("struct vm86_layout exceeds space allocated in locore.s");
  380 
  381         /*
  382          * Below is the memory layout that we use for the vm86 region.
  383          *
  384          * +--------+
  385          * |        | 
  386          * |        |
  387          * | page 0 |       
  388          * |        | +--------+
  389          * |        | | stack  |
  390          * +--------+ +--------+ <--------- vm86paddr
  391          * |        | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
  392          * |        | +--------+
  393          * |        | |  PCB   | size: ~240 bytes
  394          * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
  395          * |        | +--------+
  396          * |        | |int map |
  397          * |        | +--------+
  398          * +--------+ |        |
  399          * | page 2 | |  I/O   |
  400          * +--------+ | bitmap |
  401          * | page 3 | |        |
  402          * |        | +--------+
  403          * +--------+ 
  404          */
  405 
  406         /*
  407          * A rudimentary PCB must be installed, in order to get to the
  408          * PCB extension area.  We use the PCB area as a scratchpad for
  409          * data storage, the layout of which is shown below.
  410          *
  411          * pcb_esi      = new PTD entry 0
  412          * pcb_ebp      = pointer to frame on vm86 stack
  413          * pcb_esp      =    stack frame pointer at time of switch
  414          * pcb_ebx      = va of vm86 page table
  415          * pcb_eip      =    argument pointer to initial call
  416          * pcb_spare[0] =    saved TSS descriptor, word 0
  417          * pcb_space[1] =    saved TSS descriptor, word 1
  418          */
  419 #define new_ptd         pcb_esi
  420 #define vm86_frame      pcb_ebp
  421 #define pgtable_va      pcb_ebx
  422 
  423         pcb = &vml->vml_pcb;
  424         ext = &vml->vml_ext;
  425 
  426         bzero(pcb, sizeof(struct pcb));
  427         pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
  428         pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
  429         pcb->pgtable_va = vm86paddr;
  430         pcb->pcb_ext = ext;
  431 
  432         bzero(ext, sizeof(struct pcb_ext)); 
  433         ext->ext_tss.tss_esp0 = vm86paddr;
  434         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  435         ext->ext_tss.tss_ioopt = 
  436                 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
  437         ext->ext_iomap = vml->vml_iomap;
  438         ext->ext_vm86.vm86_intmap = vml->vml_intmap;
  439 
  440         if (cpu_feature & CPUID_VME)
  441                 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  442 
  443         addr = (u_int *)ext->ext_vm86.vm86_intmap;
  444         for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
  445                 *addr++ = 0;
  446         vml->vml_iomap_trailer = 0xff;
  447 
  448         ssd.ssd_base = (u_int)&ext->ext_tss;
  449         ssd.ssd_limit = TSS_SIZE - 1; 
  450         ssdtosd(&ssd, &ext->ext_tssd);
  451 
  452         vm86pcb = pcb;
  453 
  454 #if 0
  455         /*
  456          * use whatever is leftover of the vm86 page layout as a
  457          * message buffer so we can capture early output.
  458          */
  459         msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
  460             ctob(3) - sizeof(struct vm86_layout));
  461 #endif
  462 }
  463 
  464 vm_offset_t
  465 vm86_getpage(struct vm86context *vmc, int pagenum)
  466 {
  467         int i;
  468 
  469         for (i = 0; i < vmc->npages; i++)
  470                 if (vmc->pmap[i].pte_num == pagenum)
  471                         return (vmc->pmap[i].kva);
  472         return (0);
  473 }
  474 
  475 vm_offset_t
  476 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
  477 {
  478         int i, flags = 0;
  479 
  480         for (i = 0; i < vmc->npages; i++)
  481                 if (vmc->pmap[i].pte_num == pagenum)
  482                         goto bad;
  483 
  484         if (vmc->npages == VM86_PMAPSIZE)
  485                 goto bad;                       /* XXX grow map? */
  486 
  487         if (kva == 0) {
  488                 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
  489                 flags = VMAP_MALLOC;
  490         }
  491 
  492         i = vmc->npages++;
  493         vmc->pmap[i].flags = flags;
  494         vmc->pmap[i].kva = kva;
  495         vmc->pmap[i].pte_num = pagenum;
  496         return (kva);
  497 bad:
  498         panic("vm86_addpage: not enough room, or overlap");
  499 }
  500 
  501 static void
  502 vm86_initflags(struct vm86frame *vmf)
  503 {
  504         int eflags = vmf->vmf_eflags;
  505         struct vm86_kernel *vm86 = &curpcb->pcb_ext->ext_vm86;
  506 
  507         if (vm86->vm86_has_vme) {
  508                 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
  509                     (eflags & VME_USERCHANGE) | PSL_VM;
  510         } else {
  511                 vm86->vm86_eflags = eflags;     /* save VIF, VIP */
  512                 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |             
  513                     (eflags & VM_USERCHANGE) | PSL_VM;
  514         }
  515         vmf->vmf_eflags = eflags | PSL_VM;
  516 }
  517 
  518 /*
  519  * called from vm86_bioscall, while in vm86 address space, to finalize setup.
  520  */
  521 void
  522 vm86_prepcall(struct vm86frame vmf)
  523 {
  524         uintptr_t addr[] = { 0xA00, 0x1000 };   /* code, stack */
  525         u_char intcall[] = {
  526                 CLI, INTn, 0x00, STI, HLT
  527         };
  528 
  529         if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
  530                 /* interrupt call requested */
  531                 intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
  532                 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
  533                 vmf.vmf_ip = addr[0];
  534                 vmf.vmf_cs = 0;
  535         }
  536         vmf.vmf_sp = addr[1] - 2;              /* keep aligned */
  537         vmf.kernel_fs = vmf.kernel_es = vmf.kernel_ds = 0;
  538         vmf.vmf_ss = 0;
  539         vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
  540         vm86_initflags(&vmf);
  541 }
  542 
  543 /*
  544  * vm86 trap handler; determines whether routine succeeded or not.
  545  * Called while in vm86 space, returns to calling process.
  546  */
  547 void
  548 vm86_trap(struct vm86frame *vmf)
  549 {
  550         caddr_t addr;
  551 
  552         /* "should not happen" */
  553         if ((vmf->vmf_eflags & PSL_VM) == 0)
  554                 panic("vm86_trap called, but not in vm86 mode");
  555 
  556         addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
  557         if (*(u_char *)addr == HLT)
  558                 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
  559         else
  560                 vmf->vmf_trapno = vmf->vmf_trapno << 16;
  561 
  562         vm86_biosret(vmf);
  563 }
  564 
  565 int
  566 vm86_intcall(int intnum, struct vm86frame *vmf)
  567 {
  568         if (intnum < 0 || intnum > 0xff)
  569                 return (EINVAL);
  570 
  571         vmf->vmf_trapno = intnum;
  572         return (vm86_bioscall(vmf));
  573 }
  574 
  575 /*
  576  * struct vm86context contains the page table to use when making
  577  * vm86 calls.  If intnum is a valid interrupt number (0-255), then
  578  * the "interrupt trampoline" will be used, otherwise we use the
  579  * caller's cs:ip routine.  
  580  */
  581 int
  582 vm86_datacall(intnum, vmf, vmc)
  583         int intnum;
  584         struct vm86frame *vmf;
  585         struct vm86context *vmc;
  586 {
  587         pt_entry_t *pte = (pt_entry_t *)vm86paddr;
  588         vm_paddr_t page;
  589         int i, entry, retval;
  590 
  591         for (i = 0; i < vmc->npages; i++) {
  592                 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
  593                 entry = vmc->pmap[i].pte_num; 
  594                 vmc->pmap[i].old_pte = pte[entry];
  595                 pte[entry] = page | PG_V | PG_RW | PG_U;
  596         }
  597 
  598         vmf->vmf_trapno = intnum;
  599         retval = vm86_bioscall(vmf);
  600 
  601         for (i = 0; i < vmc->npages; i++) {
  602                 entry = vmc->pmap[i].pte_num;
  603                 pte[entry] = vmc->pmap[i].old_pte;
  604         }
  605 
  606         return (retval);
  607 }
  608 
  609 vm_offset_t
  610 vm86_getaddr(vmc, sel, off)
  611         struct vm86context *vmc;
  612         u_short sel;
  613         u_short off;
  614 {
  615         int i, page;
  616         vm_offset_t addr;
  617 
  618         addr = (vm_offset_t)MAKE_ADDR(sel, off);
  619         page = addr >> PAGE_SHIFT;
  620         for (i = 0; i < vmc->npages; i++)
  621                 if (page == vmc->pmap[i].pte_num)
  622                         return (vmc->pmap[i].kva + (addr & PAGE_MASK));
  623         return (0);
  624 }
  625 
  626 int
  627 vm86_getptr(vmc, kva, sel, off)
  628         struct vm86context *vmc;
  629         vm_offset_t kva;
  630         u_short *sel;
  631         u_short *off;
  632 {
  633         int i;
  634 
  635         for (i = 0; i < vmc->npages; i++)
  636                 if (kva >= vmc->pmap[i].kva &&
  637                     kva < vmc->pmap[i].kva + PAGE_SIZE) {
  638                         *off = kva - vmc->pmap[i].kva;
  639                         *sel = vmc->pmap[i].pte_num << 8;
  640                         return (1);
  641                 }
  642         return (0);
  643         panic("vm86_getptr: address not found");
  644 }
  645         
  646 int
  647 vm86_sysarch(p, args)
  648         struct proc *p;
  649         char *args;
  650 {
  651         int error = 0;
  652         struct i386_vm86_args ua;
  653         struct vm86_kernel *vm86;
  654 
  655         if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
  656                 return (error);
  657 
  658         if (p->p_addr->u_pcb.pcb_ext == 0)
  659                 if ((error = i386_extend_pcb(p)) != 0)
  660                         return (error);
  661         vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86;
  662 
  663         switch (ua.sub_op) {
  664         case VM86_INIT: {
  665                 struct vm86_init_args sa;
  666 
  667                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
  668                         return (error);
  669                 if (cpu_feature & CPUID_VME)
  670                         vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
  671                 else
  672                         vm86->vm86_has_vme = 0;
  673                 vm86->vm86_inited = 1;
  674                 vm86->vm86_debug = sa.debug;
  675                 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
  676                 }
  677                 break;
  678 
  679 #if 0
  680         case VM86_SET_VME: {
  681                 struct vm86_vme_args sa;
  682         
  683                 if ((cpu_feature & CPUID_VME) == 0)
  684                         return (ENODEV);
  685 
  686                 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
  687                         return (error);
  688                 if (sa.state)
  689                         load_cr4(rcr4() | CR4_VME);
  690                 else
  691                         load_cr4(rcr4() & ~CR4_VME);
  692                 }
  693                 break;
  694 #endif
  695 
  696         case VM86_GET_VME: {
  697                 struct vm86_vme_args sa;
  698 
  699                 sa.state = (rcr4() & CR4_VME ? 1 : 0);
  700                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  701                 }
  702                 break;
  703 
  704         case VM86_INTCALL: {
  705                 struct vm86_intcall_args sa;
  706 
  707                 if ((error = suser(p)))
  708                         return (error);
  709                 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
  710                         return (error);
  711                 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
  712                         return (error);
  713                 error = copyout(&sa, ua.sub_args, sizeof(sa));
  714                 }
  715                 break;
  716 
  717         default:
  718                 error = EINVAL;
  719         }
  720         return (error);
  721 }

Cache object: 243957b2f753ad9f9a8b0f594a862afc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.