FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/vm86.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997 Jonathan Lemon
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/priv.h>
35 #include <sys/proc.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_page.h>
44
45 #include <machine/md_var.h>
46 #include <machine/pcb.h>
47 #include <machine/pcb_ext.h>
48 #include <machine/psl.h>
49 #include <machine/specialreg.h>
50 #include <machine/sysarch.h>
51
52 extern int vm86pa;
53 extern struct pcb *vm86pcb;
54
55 static struct mtx vm86_lock;
56
57 extern int vm86_bioscall(struct vm86frame *);
58 extern void vm86_biosret(struct vm86frame *);
59
60 void vm86_prepcall(struct vm86frame *);
61
62 struct system_map {
63 int type;
64 vm_offset_t start;
65 vm_offset_t end;
66 };
67
68 #define HLT 0xf4
69 #define CLI 0xfa
70 #define STI 0xfb
71 #define PUSHF 0x9c
72 #define POPF 0x9d
73 #define INTn 0xcd
74 #define IRET 0xcf
75 #define CALLm 0xff
76 #define OPERAND_SIZE_PREFIX 0x66
77 #define ADDRESS_SIZE_PREFIX 0x67
78 #define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
79 #define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
80
81 static int
82 vm86_suword16(volatile void *base, int word)
83 {
84
85 if (curthread->td_critnest != 0) {
86 *(volatile uint16_t *)base = word;
87 return (0);
88 }
89 return (suword16(base, word));
90 }
91
92 static int
93 vm86_suword(volatile void *base, long word)
94 {
95
96 if (curthread->td_critnest != 0) {
97 *(volatile long *)base = word;
98 return (0);
99 }
100 return (suword(base, word));
101 }
102
103 static int
104 vm86_fubyte(volatile const void *base)
105 {
106
107 if (curthread->td_critnest != 0)
108 return (*(volatile const u_char *)base);
109 return (fubyte(base));
110 }
111
112 static int
113 vm86_fuword16(volatile const void *base)
114 {
115
116 if (curthread->td_critnest != 0)
117 return (*(volatile const uint16_t *)base);
118 return (fuword16(base));
119 }
120
121 static long
122 vm86_fuword(volatile const void *base)
123 {
124
125 if (curthread->td_critnest != 0)
126 return (*(volatile const long *)base);
127 return (fuword(base));
128 }
129
130 static __inline caddr_t
131 MAKE_ADDR(u_short sel, u_short off)
132 {
133 return ((caddr_t)((sel << 4) + off));
134 }
135
136 static __inline void
137 GET_VEC(u_int vec, u_short *sel, u_short *off)
138 {
139 *sel = vec >> 16;
140 *off = vec & 0xffff;
141 }
142
143 static __inline u_int
144 MAKE_VEC(u_short sel, u_short off)
145 {
146 return ((sel << 16) | off);
147 }
148
149 static __inline void
150 PUSH(u_short x, struct vm86frame *vmf)
151 {
152 vmf->vmf_sp -= 2;
153 vm86_suword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
154 }
155
156 static __inline void
157 PUSHL(u_int x, struct vm86frame *vmf)
158 {
159 vmf->vmf_sp -= 4;
160 vm86_suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
161 }
162
163 static __inline u_short
164 POP(struct vm86frame *vmf)
165 {
166 u_short x = vm86_fuword16(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
167
168 vmf->vmf_sp += 2;
169 return (x);
170 }
171
172 static __inline u_int
173 POPL(struct vm86frame *vmf)
174 {
175 u_int x = vm86_fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
176
177 vmf->vmf_sp += 4;
178 return (x);
179 }
180
181 int
182 vm86_emulate(struct vm86frame *vmf)
183 {
184 struct vm86_kernel *vm86;
185 caddr_t addr;
186 u_char i_byte;
187 u_int temp_flags;
188 int inc_ip = 1;
189 int retcode = 0;
190
191 /*
192 * pcb_ext contains the address of the extension area, or zero if
193 * the extension is not present. (This check should not be needed,
194 * as we can't enter vm86 mode until we set up an extension area)
195 */
196 if (curpcb->pcb_ext == 0)
197 return (SIGBUS);
198 vm86 = &curpcb->pcb_ext->ext_vm86;
199
200 if (vmf->vmf_eflags & PSL_T)
201 retcode = SIGTRAP;
202
203 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
204 i_byte = vm86_fubyte(addr);
205 if (i_byte == ADDRESS_SIZE_PREFIX) {
206 i_byte = vm86_fubyte(++addr);
207 inc_ip++;
208 }
209
210 if (vm86->vm86_has_vme) {
211 switch (i_byte) {
212 case OPERAND_SIZE_PREFIX:
213 i_byte = vm86_fubyte(++addr);
214 inc_ip++;
215 switch (i_byte) {
216 case PUSHF:
217 if (vmf->vmf_eflags & PSL_VIF)
218 PUSHL((vmf->vmf_eflags & PUSH_MASK)
219 | PSL_IOPL | PSL_I, vmf);
220 else
221 PUSHL((vmf->vmf_eflags & PUSH_MASK)
222 | PSL_IOPL, vmf);
223 vmf->vmf_ip += inc_ip;
224 return (retcode);
225
226 case POPF:
227 temp_flags = POPL(vmf) & POP_MASK;
228 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
229 | temp_flags | PSL_VM | PSL_I;
230 vmf->vmf_ip += inc_ip;
231 if (temp_flags & PSL_I) {
232 vmf->vmf_eflags |= PSL_VIF;
233 if (vmf->vmf_eflags & PSL_VIP)
234 break;
235 } else {
236 vmf->vmf_eflags &= ~PSL_VIF;
237 }
238 return (retcode);
239 }
240 break;
241
242 /* VME faults here if VIP is set, but does not set VIF. */
243 case STI:
244 vmf->vmf_eflags |= PSL_VIF;
245 vmf->vmf_ip += inc_ip;
246 if ((vmf->vmf_eflags & PSL_VIP) == 0) {
247 uprintf("fatal sti\n");
248 return (SIGKILL);
249 }
250 break;
251
252 /* VME if no redirection support */
253 case INTn:
254 break;
255
256 /* VME if trying to set PSL_T, or PSL_I when VIP is set */
257 case POPF:
258 temp_flags = POP(vmf) & POP_MASK;
259 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
260 | temp_flags | PSL_VM | PSL_I;
261 vmf->vmf_ip += inc_ip;
262 if (temp_flags & PSL_I) {
263 vmf->vmf_eflags |= PSL_VIF;
264 if (vmf->vmf_eflags & PSL_VIP)
265 break;
266 } else {
267 vmf->vmf_eflags &= ~PSL_VIF;
268 }
269 return (retcode);
270
271 /* VME if trying to set PSL_T, or PSL_I when VIP is set */
272 case IRET:
273 vmf->vmf_ip = POP(vmf);
274 vmf->vmf_cs = POP(vmf);
275 temp_flags = POP(vmf) & POP_MASK;
276 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
277 | temp_flags | PSL_VM | PSL_I;
278 if (temp_flags & PSL_I) {
279 vmf->vmf_eflags |= PSL_VIF;
280 if (vmf->vmf_eflags & PSL_VIP)
281 break;
282 } else {
283 vmf->vmf_eflags &= ~PSL_VIF;
284 }
285 return (retcode);
286
287 }
288 return (SIGBUS);
289 }
290
291 switch (i_byte) {
292 case OPERAND_SIZE_PREFIX:
293 i_byte = vm86_fubyte(++addr);
294 inc_ip++;
295 switch (i_byte) {
296 case PUSHF:
297 if (vm86->vm86_eflags & PSL_VIF)
298 PUSHL((vmf->vmf_flags & PUSH_MASK)
299 | PSL_IOPL | PSL_I, vmf);
300 else
301 PUSHL((vmf->vmf_flags & PUSH_MASK)
302 | PSL_IOPL, vmf);
303 vmf->vmf_ip += inc_ip;
304 return (retcode);
305
306 case POPF:
307 temp_flags = POPL(vmf) & POP_MASK;
308 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
309 | temp_flags | PSL_VM | PSL_I;
310 vmf->vmf_ip += inc_ip;
311 if (temp_flags & PSL_I) {
312 vm86->vm86_eflags |= PSL_VIF;
313 if (vm86->vm86_eflags & PSL_VIP)
314 break;
315 } else {
316 vm86->vm86_eflags &= ~PSL_VIF;
317 }
318 return (retcode);
319 }
320 return (SIGBUS);
321
322 case CLI:
323 vm86->vm86_eflags &= ~PSL_VIF;
324 vmf->vmf_ip += inc_ip;
325 return (retcode);
326
327 case STI:
328 /* if there is a pending interrupt, go to the emulator */
329 vm86->vm86_eflags |= PSL_VIF;
330 vmf->vmf_ip += inc_ip;
331 if (vm86->vm86_eflags & PSL_VIP)
332 break;
333 return (retcode);
334
335 case PUSHF:
336 if (vm86->vm86_eflags & PSL_VIF)
337 PUSH((vmf->vmf_flags & PUSH_MASK)
338 | PSL_IOPL | PSL_I, vmf);
339 else
340 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
341 vmf->vmf_ip += inc_ip;
342 return (retcode);
343
344 case INTn:
345 i_byte = vm86_fubyte(addr + 1);
346 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
347 break;
348 if (vm86->vm86_eflags & PSL_VIF)
349 PUSH((vmf->vmf_flags & PUSH_MASK)
350 | PSL_IOPL | PSL_I, vmf);
351 else
352 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
353 PUSH(vmf->vmf_cs, vmf);
354 PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
355 GET_VEC(vm86_fuword((caddr_t)(i_byte * 4)),
356 &vmf->vmf_cs, &vmf->vmf_ip);
357 vmf->vmf_flags &= ~PSL_T;
358 vm86->vm86_eflags &= ~PSL_VIF;
359 return (retcode);
360
361 case IRET:
362 vmf->vmf_ip = POP(vmf);
363 vmf->vmf_cs = POP(vmf);
364 temp_flags = POP(vmf) & POP_MASK;
365 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
366 | temp_flags | PSL_VM | PSL_I;
367 if (temp_flags & PSL_I) {
368 vm86->vm86_eflags |= PSL_VIF;
369 if (vm86->vm86_eflags & PSL_VIP)
370 break;
371 } else {
372 vm86->vm86_eflags &= ~PSL_VIF;
373 }
374 return (retcode);
375
376 case POPF:
377 temp_flags = POP(vmf) & POP_MASK;
378 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
379 | temp_flags | PSL_VM | PSL_I;
380 vmf->vmf_ip += inc_ip;
381 if (temp_flags & PSL_I) {
382 vm86->vm86_eflags |= PSL_VIF;
383 if (vm86->vm86_eflags & PSL_VIP)
384 break;
385 } else {
386 vm86->vm86_eflags &= ~PSL_VIF;
387 }
388 return (retcode);
389 }
390 return (SIGBUS);
391 }
392
393 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
394 #define INTMAP_SIZE 32
395 #define IOMAP_SIZE ctob(IOPAGES)
396 #define TSS_SIZE \
397 (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
398 INTMAP_SIZE + IOMAP_SIZE + 1)
399
400 struct vm86_layout {
401 pt_entry_t vml_pgtbl[PGTABLE_SIZE];
402 struct pcb vml_pcb;
403 struct pcb_ext vml_ext;
404 char vml_intmap[INTMAP_SIZE];
405 char vml_iomap[IOMAP_SIZE];
406 char vml_iomap_trailer;
407 };
408
409 void
410 vm86_initialize(void)
411 {
412 int i;
413 u_int *addr;
414 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
415 struct pcb *pcb;
416 struct pcb_ext *ext;
417 struct soft_segment_descriptor ssd = {
418 0, /* segment base address (overwritten) */
419 0, /* length (overwritten) */
420 SDT_SYS386TSS, /* segment type */
421 0, /* priority level */
422 1, /* descriptor present */
423 0, 0,
424 0, /* default 16 size */
425 0 /* granularity */
426 };
427
428 /*
429 * this should be a compile time error, but cpp doesn't grok sizeof().
430 */
431 if (sizeof(struct vm86_layout) > ctob(3))
432 panic("struct vm86_layout exceeds space allocated in locore.s");
433
434 /*
435 * Below is the memory layout that we use for the vm86 region.
436 *
437 * +--------+
438 * | |
439 * | |
440 * | page 0 |
441 * | | +--------+
442 * | | | stack |
443 * +--------+ +--------+ <--------- vm86paddr
444 * | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
445 * | | +--------+
446 * | | | PCB | size: ~240 bytes
447 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
448 * | | +--------+
449 * | | |int map |
450 * | | +--------+
451 * +--------+ | |
452 * | page 2 | | I/O |
453 * +--------+ | bitmap |
454 * | page 3 | | |
455 * | | +--------+
456 * +--------+
457 */
458
459 /*
460 * A rudimentary PCB must be installed, in order to get to the
461 * PCB extension area. We use the PCB area as a scratchpad for
462 * data storage, the layout of which is shown below.
463 *
464 * pcb_esi = new PTD entry 0
465 * pcb_ebp = pointer to frame on vm86 stack
466 * pcb_esp = stack frame pointer at time of switch
467 * pcb_ebx = va of vm86 page table
468 * pcb_eip = argument pointer to initial call
469 * pcb_vm86[0] = saved TSS descriptor, word 0
470 * pcb_vm86[1] = saved TSS descriptor, word 1
471 */
472 #define new_ptd pcb_esi
473 #define vm86_frame pcb_ebp
474 #define pgtable_va pcb_ebx
475
476 pcb = &vml->vml_pcb;
477 ext = &vml->vml_ext;
478
479 mtx_init(&vm86_lock, "vm86 lock", NULL, MTX_DEF);
480
481 bzero(pcb, sizeof(struct pcb));
482 pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
483 pcb->vm86_frame = vm86paddr - sizeof(struct vm86frame);
484 pcb->pgtable_va = vm86paddr;
485 pcb->pcb_flags = PCB_VM86CALL;
486 pcb->pcb_ext = ext;
487
488 bzero(ext, sizeof(struct pcb_ext));
489 ext->ext_tss.tss_esp0 = vm86paddr;
490 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
491 ext->ext_tss.tss_ioopt =
492 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
493 ext->ext_iomap = vml->vml_iomap;
494 ext->ext_vm86.vm86_intmap = vml->vml_intmap;
495
496 if (cpu_feature & CPUID_VME)
497 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
498
499 addr = (u_int *)ext->ext_vm86.vm86_intmap;
500 for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
501 *addr++ = 0;
502 vml->vml_iomap_trailer = 0xff;
503
504 ssd.ssd_base = (u_int)&ext->ext_tss;
505 ssd.ssd_limit = TSS_SIZE - 1;
506 ssdtosd(&ssd, &ext->ext_tssd);
507
508 vm86pcb = pcb;
509
510 #if 0
511 /*
512 * use whatever is leftover of the vm86 page layout as a
513 * message buffer so we can capture early output.
514 */
515 msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
516 ctob(3) - sizeof(struct vm86_layout));
517 #endif
518 }
519
520 vm_offset_t
521 vm86_getpage(struct vm86context *vmc, int pagenum)
522 {
523 int i;
524
525 for (i = 0; i < vmc->npages; i++)
526 if (vmc->pmap[i].pte_num == pagenum)
527 return (vmc->pmap[i].kva);
528 return (0);
529 }
530
531 vm_offset_t
532 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
533 {
534 int i, flags = 0;
535
536 for (i = 0; i < vmc->npages; i++)
537 if (vmc->pmap[i].pte_num == pagenum)
538 goto overlap;
539
540 if (vmc->npages == VM86_PMAPSIZE)
541 goto full; /* XXX grow map? */
542
543 if (kva == 0) {
544 kva = (vm_offset_t)malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
545 flags = VMAP_MALLOC;
546 }
547
548 i = vmc->npages++;
549 vmc->pmap[i].flags = flags;
550 vmc->pmap[i].kva = kva;
551 vmc->pmap[i].pte_num = pagenum;
552 return (kva);
553 overlap:
554 panic("vm86_addpage: overlap");
555 full:
556 panic("vm86_addpage: not enough room");
557 }
558
559 /*
560 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
561 */
562 void
563 vm86_prepcall(struct vm86frame *vmf)
564 {
565 struct vm86_kernel *vm86;
566 uint32_t *stack;
567 uint8_t *code;
568
569 code = (void *)0xa00;
570 stack = (void *)(0x1000 - 2); /* keep aligned */
571 if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
572 /* interrupt call requested */
573 code[0] = INTn;
574 code[1] = vmf->vmf_trapno & 0xff;
575 code[2] = HLT;
576 vmf->vmf_ip = (uintptr_t)code;
577 vmf->vmf_cs = 0;
578 } else {
579 code[0] = HLT;
580 stack--;
581 stack[0] = MAKE_VEC(0, (uintptr_t)code);
582 }
583 vmf->vmf_sp = (uintptr_t)stack;
584 vmf->vmf_ss = 0;
585 vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = 0;
586 vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
587
588 vm86 = &curpcb->pcb_ext->ext_vm86;
589 if (!vm86->vm86_has_vme)
590 vm86->vm86_eflags = vmf->vmf_eflags; /* save VIF, VIP */
591 }
592
593 /*
594 * vm86 trap handler; determines whether routine succeeded or not.
595 * Called while in vm86 space, returns to calling process.
596 */
597 void
598 vm86_trap(struct vm86frame *vmf)
599 {
600 void (*p)(struct vm86frame *);
601 caddr_t addr;
602
603 /* "should not happen" */
604 if ((vmf->vmf_eflags & PSL_VM) == 0)
605 panic("vm86_trap called, but not in vm86 mode");
606
607 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
608 if (*(u_char *)addr == HLT)
609 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
610 else
611 vmf->vmf_trapno = vmf->vmf_trapno << 16;
612
613 p = (void (*)(struct vm86frame *))((uintptr_t)vm86_biosret +
614 setidt_disp);
615 p(vmf);
616 }
617
618 int
619 vm86_intcall(int intnum, struct vm86frame *vmf)
620 {
621 int (*p)(struct vm86frame *);
622 int retval;
623
624 if (intnum < 0 || intnum > 0xff)
625 return (EINVAL);
626
627 vmf->vmf_trapno = intnum;
628 p = (int (*)(struct vm86frame *))((uintptr_t)vm86_bioscall +
629 setidt_disp);
630 mtx_lock(&vm86_lock);
631 critical_enter();
632 retval = p(vmf);
633 critical_exit();
634 mtx_unlock(&vm86_lock);
635 return (retval);
636 }
637
638 /*
639 * struct vm86context contains the page table to use when making
640 * vm86 calls. If intnum is a valid interrupt number (0-255), then
641 * the "interrupt trampoline" will be used, otherwise we use the
642 * caller's cs:ip routine.
643 */
644 int
645 vm86_datacall(int intnum, struct vm86frame *vmf, struct vm86context *vmc)
646 {
647 pt_entry_t *pte;
648 int (*p)(struct vm86frame *);
649 vm_paddr_t page;
650 int i, entry, retval;
651
652 pte = (pt_entry_t *)vm86paddr;
653 mtx_lock(&vm86_lock);
654 for (i = 0; i < vmc->npages; i++) {
655 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
656 entry = vmc->pmap[i].pte_num;
657 vmc->pmap[i].old_pte = pte[entry];
658 pte[entry] = page | PG_V | PG_RW | PG_U;
659 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
660 }
661
662 vmf->vmf_trapno = intnum;
663 p = (int (*)(struct vm86frame *))((uintptr_t)vm86_bioscall +
664 setidt_disp);
665 critical_enter();
666 retval = p(vmf);
667 critical_exit();
668
669 for (i = 0; i < vmc->npages; i++) {
670 entry = vmc->pmap[i].pte_num;
671 pte[entry] = vmc->pmap[i].old_pte;
672 pmap_invalidate_page(kernel_pmap, vmc->pmap[i].kva);
673 }
674 mtx_unlock(&vm86_lock);
675
676 return (retval);
677 }
678
679 vm_offset_t
680 vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
681 {
682 int i, page;
683 vm_offset_t addr;
684
685 addr = (vm_offset_t)MAKE_ADDR(sel, off);
686 page = addr >> PAGE_SHIFT;
687 for (i = 0; i < vmc->npages; i++)
688 if (page == vmc->pmap[i].pte_num)
689 return (vmc->pmap[i].kva + (addr & PAGE_MASK));
690 return (0);
691 }
692
693 int
694 vm86_getptr(struct vm86context *vmc, vm_offset_t kva, u_short *sel,
695 u_short *off)
696 {
697 int i;
698
699 for (i = 0; i < vmc->npages; i++)
700 if (kva >= vmc->pmap[i].kva &&
701 kva < vmc->pmap[i].kva + PAGE_SIZE) {
702 *off = kva - vmc->pmap[i].kva;
703 *sel = vmc->pmap[i].pte_num << 8;
704 return (1);
705 }
706 return (0);
707 }
708
709 int
710 vm86_sysarch(struct thread *td, char *args)
711 {
712 int error = 0;
713 struct i386_vm86_args ua;
714 struct vm86_kernel *vm86;
715
716 if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
717 return (error);
718
719 if (td->td_pcb->pcb_ext == 0)
720 if ((error = i386_extend_pcb(td)) != 0)
721 return (error);
722 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
723
724 switch (ua.sub_op) {
725 case VM86_INIT: {
726 struct vm86_init_args sa;
727
728 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
729 return (error);
730 if (cpu_feature & CPUID_VME)
731 vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
732 else
733 vm86->vm86_has_vme = 0;
734 vm86->vm86_inited = 1;
735 vm86->vm86_debug = sa.debug;
736 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
737 }
738 break;
739
740 #if 0
741 case VM86_SET_VME: {
742 struct vm86_vme_args sa;
743
744 if ((cpu_feature & CPUID_VME) == 0)
745 return (ENODEV);
746
747 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
748 return (error);
749 if (sa.state)
750 load_cr4(rcr4() | CR4_VME);
751 else
752 load_cr4(rcr4() & ~CR4_VME);
753 }
754 break;
755 #endif
756
757 case VM86_GET_VME: {
758 struct vm86_vme_args sa;
759
760 sa.state = (rcr4() & CR4_VME ? 1 : 0);
761 error = copyout(&sa, ua.sub_args, sizeof(sa));
762 }
763 break;
764
765 case VM86_INTCALL: {
766 struct vm86_intcall_args sa;
767
768 if ((error = priv_check(td, PRIV_VM86_INTCALL)))
769 return (error);
770 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
771 return (error);
772 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
773 return (error);
774 error = copyout(&sa, ua.sub_args, sizeof(sa));
775 }
776 break;
777
778 default:
779 error = EINVAL;
780 }
781 return (error);
782 }
Cache object: 93e1e5e4e0fa575c506dbf243ccca246
|