FreeBSD/Linux Kernel Cross Reference
sys/i386/trap.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: trap.c,v $
29 * Revision 2.24 93/11/17 16:40:32 dbg
30 * Made interrupted_pc a macro in machine/thread.h.
31 * [93/10/14 dbg]
32 *
33 * Declare non-returning functions as returning 'no_return'.
34 * Remove include of kern/sched.h. Include vm/vm_fault.h for
35 * routine definitions. Added ANSI function prototypes.
36 * [93/05/21 dbg]
37 *
38 * Revision 2.23 93/05/15 19:30:47 mrt
39 * machparam.h -> machspl.h
40 *
41 * Revision 2.22 93/05/10 23:23:53 rvb
42 * Added TTD teledebug code to handle traps. Code mirrors ddb.
43 * [93/03/01 grm]
44 *
45 * Revision 2.21 93/01/24 13:14:55 danner
46 * Installed pc sampling from C Maeda; added interrupted_pc().
47 * [93/01/12 rvb]
48 *
49 * Revision 2.20 93/01/14 17:29:45 danner
50 * Proper spl typing.
51 * [92/11/30 af]
52 *
53 * Revision 2.19 92/01/03 20:09:29 dbg
54 * Build retry table for certain successful faults.
55 * Enable IO instruction emulation in V86 mode.
56 * [91/12/01 dbg]
57 *
58 * Add i386_astintr to handle delayed floating-point exceptions.
59 * [91/10/29 dbg]
60 *
61 * Check for use of user FP register segment if floating-point
62 * emulator present. Pass i386 trap number as exception code
63 * for all i386 exceptions. Route i386 exceptions through
64 * emulator fixup routine if exception taken within emulator.
65 *
66 * Eliminate warning on 'ifdef'. Remove offending type
67 * declarations.
68 * [91/10/19 dbg]
69 *
70 * Revision 2.18 91/10/09 16:07:23 af
71 * Checked kdb trap for user space T_DEBUG and T_INT3.
72 * [91/08/29 tak]
73 *
74 * Revision 2.17 91/08/28 21:37:16 jsb
75 * Don't emulate IO instructions if in V86 mode.
76 * [91/08/21 dbg]
77 *
78 * Revision 2.16 91/08/24 11:57:09 af
79 * Revision 2.15.3.1 91/08/19 13:45:20 danner
80 * Make the file safe for gcc 1.36. There is a really bizarro
81 * structure assignment of an array that starts at zero that
82 * nukes us.
83 * [91/08/07 rvb]
84 *
85 * Revision 2.15.3.1 91/08/19 13:45:20 danner
86 * Make the file safe for gcc 1.36. There is a really bizarro
87 * structure assignment of an array that starts at zero that
88 * nukes us.
89 * [91/08/07 rvb]
90 *
91 * Revision 2.15 91/07/31 17:42:21 dbg
92 * Separate user and kernel trap cases. Combine user and v86-mode
93 * trap cases (except for calling instruction assist).
94 *
95 * New v86 interrupt simulation.
96 *
97 * Check for two copyout failure locations.
98 * [91/07/30 17:01:10 dbg]
99 *
100 * Revision 2.14 91/06/06 17:04:06 jsb
101 * i386_read_fault is now intel_read_fault.
102 * [91/05/13 16:56:39 jsb]
103 *
104 * Revision 2.13 91/05/14 16:18:11 mrt
105 * Correcting copyright
106 *
107 * Revision 2.12 91/05/08 12:43:35 dbg
108 * Correct calls to FPU error routines.
109 * [91/04/26 14:39:33 dbg]
110 *
111 * Revision 2.11 91/03/16 14:45:28 rpd
112 * Added resume, continuation arguments to vm_fault.
113 * Added user_page_fault_continue.
114 * [91/02/05 rpd]
115 * Removed astintr.
116 * [91/01/22 15:53:33 rpd]
117 *
118 * Revision 2.10 91/02/14 14:41:59 mrt
119 * rfr's latest changes to v86 assist
120 * [91/01/28 15:25:30 rvb]
121 *
122 * Revision 2.9 91/02/05 17:15:21 mrt
123 * Changed to new Mach copyright
124 * [91/02/01 17:38:41 mrt]
125 *
126 * Revision 2.8 91/01/09 22:41:55 rpd
127 * Fixed a merge bug.
128 * [91/01/09 rpd]
129 *
130 * Revision 2.7 91/01/08 17:32:21 rpd
131 * Add v86_hdw_assist().
132 * [91/01/04 09:54:24 rvb]
133 *
134 * Basically add trapv86()
135 * [90/12/20 10:21:01 rvb]
136 *
137 * Revision 2.6 91/01/08 15:11:18 rpd
138 * Only need csw_needed in AST exit path.
139 * [90/12/27 rpd]
140 *
141 * Replaced thread_doexception with new exception interface.
142 * [90/12/21 rpd]
143 * Added continuation argument to thread_block.
144 * [90/12/08 rpd]
145 *
146 * Revision 2.5 90/10/25 14:44:56 rwd
147 * Added watchpoint support.
148 * [90/10/18 rpd]
149 *
150 * Revision 2.4 90/06/02 14:48:58 rpd
151 * Updated to new csw_needed macro.
152 * [90/06/02 rpd]
153 *
154 * Revision 2.3 90/05/21 13:26:49 dbg
155 * Add hook for emulating IO instructions.
156 * [90/05/17 dbg]
157 *
158 * Revision 2.2 90/05/03 15:38:07 dbg
159 * V86 mode is also user mode.
160 * [90/04/26 dbg]
161 *
162 * Created (from VAX version).
163 * [90/02/08 dbg]
164 *
165 */
166 /*
167 * Hardware trap/fault handler.
168 */
169
170 #include <cpus.h>
171 #include <fpe.h>
172 #include <mach_kdb.h>
173 #include <mach_ttd.h>
174
175 #include <sys/types.h>
176 #include <i386/eflags.h>
177 #include <i386/trap.h>
178 #include <machine/machspl.h> /* for spl_t */
179
180 #include <mach/exception.h>
181 #include <mach/kern_return.h>
182 #include <mach/vm_param.h>
183 #include <mach/i386/thread_status.h>
184
185 #include <vm/vm_kern.h>
186 #include <vm/vm_map.h>
187 #include <vm/vm_fault.h>
188
189 #include <kern/ast.h>
190 #include <kern/exception.h>
191 #include <kern/memory.h>
192 #include <kern/sched_prim.h>
193 #include <kern/task.h>
194 #include <kern/thread.h>
195
196 #include <i386/io_emulate.h>
197
198 #if MACH_KDB
199 #include <ddb/db_break.h>
200 #include <ddb/db_run.h>
201 #include <ddb/db_watch.h>
202 #endif
203
204 no_return i386_exception( /* forward */
205 int exc,
206 int code,
207 int subcode);
208
209 #if FPE
210 extern no_return fpe_exception_fixup(
211 int _exception,
212 int code,
213 int subcode);
214 #endif /* FPE */
215
216 boolean_t v86_assist(
217 thread_t thread,
218 struct i386_saved_state *regs);
219
220 boolean_t check_io_fault(
221 struct i386_saved_state *regs);
222
223 #if MACH_KDB
224 boolean_t debug_all_traps_with_kdb = FALSE;
225
226 void
227 thread_kdb_return(void)
228 {
229 register thread_t thread = current_thread();
230 register struct i386_saved_state *regs = USER_REGS(thread);
231
232 if (kdb_trap(regs->trapno, regs->err, regs)) {
233 thread_exception_return();
234 /*NOTREACHED*/
235 }
236 }
237 #endif /* MACH_KDB */
238
239 #if MACH_TTD
240 extern boolean_t kttd_enabled;
241 boolean_t debug_all_traps_with_kttd = TRUE;
242 #endif /* MACH_TTD */
243
244 no_return
245 user_page_fault_continue(
246 kern_return_t kr)
247 {
248 register thread_t thread = current_thread();
249 register struct i386_saved_state *regs = USER_REGS(thread);
250
251 if (kr == KERN_SUCCESS) {
252 #if MACH_KDB
253 if (db_watchpoints_enabled() &&
254 (regs->err & T_PF_WRITE) &&
255 db_find_watchpoint(thread->task->map,
256 (vm_offset_t)regs->cr2,
257 regs))
258 kdb_trap(T_WATCHPOINT, 0, regs);
259 #endif /* MACH_KDB */
260 thread_exception_return();
261 /*NOTREACHED*/
262 }
263
264 #if MACH_KDB
265 if (debug_all_traps_with_kdb &&
266 kdb_trap(regs->trapno, regs->err, regs)) {
267 thread_exception_return();
268 /*NOTREACHED*/
269 }
270 #endif /* MACH_KDB */
271
272 i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
273 /*NOTREACHED*/
274 }
275
276 /*
277 * Fault recovery in copyin/copyout routines.
278 */
279 struct recovery {
280 int fault_addr;
281 int recover_addr;
282 };
283
284 extern struct recovery recover_table[];
285 extern struct recovery recover_table_end[];
286
287 /*
288 * Recovery from Successful fault in copyout does not
289 * return directly - it retries the pte check, since
290 * the 386 ignores write protection in kernel mode.
291 */
292 extern struct recovery retry_table[];
293 extern struct recovery retry_table_end[];
294
295 char * trap_type[] = {
296 "Divide error",
297 "Debug trap",
298 "NMI",
299 "Breakpoint",
300 "Overflow",
301 "Bounds check",
302 "Invalid opcode",
303 "No coprocessor",
304 "Double fault",
305 "Coprocessor overrun",
306 "Invalid TSS",
307 "Segment not present",
308 "Stack bounds",
309 "General protection",
310 "Page fault",
311 "(reserved)",
312 "Coprocessor error"
313 };
314 int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
315
316 boolean_t brb = TRUE;
317
318 /*
319 * Trap from kernel mode. Only page-fault errors are recoverable,
320 * and then only in special circumstances. All other errors are
321 * fatal.
322 */
323 void kernel_trap(
324 register struct i386_saved_state *regs)
325 {
326 int code;
327 int subcode;
328 register int type;
329 vm_map_t map;
330 kern_return_t result;
331 register thread_t thread;
332
333 type = regs->trapno;
334 code = regs->err;
335 thread = current_thread();
336
337 switch (type) {
338 case T_NO_FPU:
339 fpnoextflt();
340 return;
341
342 case T_FPU_FAULT:
343 fpextovrflt();
344 return;
345
346 case T_FLOATING_POINT_ERROR:
347 fpexterrflt();
348 return;
349
350 case T_PAGE_FAULT:
351 /*
352 * If the current map is a submap of the kernel map,
353 * and the address is within that map, fault on that
354 * map. If the same check is done in vm_fault
355 * (vm_map_lookup), we may deadlock on the kernel map
356 * lock.
357 */
358 subcode = regs->cr2; /* get faulting address */
359
360 if (thread == THREAD_NULL)
361 map = kernel_map;
362 else {
363 map = thread->task->map;
364 if ((vm_offset_t)subcode < vm_map_min(map) ||
365 (vm_offset_t)subcode >= vm_map_max(map))
366 map = kernel_map;
367 }
368
369 /*
370 * Since the 386 ignores write protection in
371 * kernel mode, always try for write permission
372 * first. If that fails and the fault was a
373 * read fault, retry with read permission.
374 */
375 result = vm_fault(map,
376 trunc_page((vm_offset_t)subcode),
377 VM_PROT_READ|VM_PROT_WRITE,
378 FALSE,
379 FALSE,
380 0);
381 #if MACH_KDB
382 if (result == KERN_SUCCESS) {
383 /* Look for watchpoints */
384 if (db_watchpoints_enabled() &&
385 (code & T_PF_WRITE) &&
386 db_find_watchpoint(map,
387 (vm_offset_t)subcode, regs))
388 kdb_trap(T_WATCHPOINT, 0, regs);
389 }
390 else
391 #endif /* MACH_KDB */
392 if ((code & T_PF_WRITE) == 0 &&
393 result == KERN_PROTECTION_FAILURE)
394 {
395 /*
396 * Must expand vm_fault by hand,
397 * so that we can ask for read-only access
398 * but enter a (kernel)writable mapping.
399 */
400 result = intel_read_fault(map,
401 trunc_page((vm_offset_t)subcode));
402 }
403
404 if (result == KERN_SUCCESS) {
405 /*
406 * Certain faults require that we back up
407 * the EIP.
408 */
409 register struct recovery *rp;
410
411 for (rp = retry_table; rp < retry_table_end; rp++) {
412 if (regs->eip == rp->fault_addr) {
413 regs->eip = rp->recover_addr;
414 break;
415 }
416 }
417 return;
418 }
419
420 /*
421 * If there is a failure recovery address
422 * for this fault, go there.
423 */
424 {
425 register struct recovery *rp;
426
427 for (rp = recover_table;
428 rp < recover_table_end;
429 rp++) {
430 if (regs->eip == rp->fault_addr) {
431 regs->eip = rp->recover_addr;
432 return;
433 }
434 }
435 }
436
437 /*
438 * Check thread recovery address also -
439 * v86 assist uses it.
440 */
441 if (thread->recover) {
442 regs->eip = thread->recover;
443 thread->recover = 0;
444 return;
445 }
446
447 /*
448 * Unanticipated page-fault errors in kernel
449 * should not happen.
450 */
451 /* fall through */
452
453 default:
454 #if MACH_TTD
455 if (kttd_enabled && kttd_trap(type, code, regs))
456 return;
457 #endif /* MACH_TTD */
458 #if MACH_KDB
459 if (kdb_trap(type, code, regs))
460 return;
461 #endif /* MACH_KDB */
462 panic("Trap: trap type %d, code = %x, pc = %x\n",
463 type, code, regs->eip);
464 return;
465 }
466 }
467
468
469 /*
470 * Trap from user mode.
471 */
472 void user_trap(
473 register struct i386_saved_state *regs)
474 {
475 int exc;
476 int code;
477 int subcode;
478 register int type;
479 register thread_t thread = current_thread();
480
481 if (regs->efl & EFL_VM) {
482 /*
483 * If hardware assist can handle exception,
484 * continue execution.
485 */
486 if (v86_assist(thread, regs))
487 return;
488 }
489
490 type = regs->trapno;
491 code = 0;
492 subcode = 0;
493
494 switch (type) {
495
496 case T_DIVIDE_ERROR:
497 exc = EXC_ARITHMETIC;
498 code = EXC_I386_DIV;
499 break;
500
501 case T_DEBUG:
502 #if MACH_TTD
503 if (kttd_enabled && kttd_in_single_step()) {
504 if (kttd_trap(type, regs->err, regs))
505 return;
506 }
507 #endif /* MACH_TTD */
508 #if MACH_KDB
509 if (db_in_single_step()) {
510 if (kdb_trap(type, regs->err, regs))
511 return;
512 }
513 #endif /* MACH_KDB */
514 exc = EXC_BREAKPOINT;
515 code = EXC_I386_SGL;
516 break;
517
518 case T_INT3:
519 #if MACH_TTD
520 if (kttd_enabled && kttd_trap(type, regs->err, regs))
521 return;
522 break;
523 #endif /* MACH_TTD */
524 #if MACH_KDB
525 if (db_find_breakpoint_here(
526 (current_thread())? current_thread()->task: TASK_NULL,
527 regs->eip - 1)) {
528 if (kdb_trap(type, regs->err, regs))
529 return;
530 }
531 #endif /* MACH_KDB */
532 exc = EXC_BREAKPOINT;
533 code = EXC_I386_BPT;
534 break;
535
536 case T_OVERFLOW:
537 exc = EXC_ARITHMETIC;
538 code = EXC_I386_INTO;
539 break;
540
541 case T_OUT_OF_BOUNDS:
542 exc = EXC_SOFTWARE;
543 code = EXC_I386_BOUND;
544 break;
545
546 case T_INVALID_OPCODE:
547 exc = EXC_BAD_INSTRUCTION;
548 code = EXC_I386_INVOP;
549 break;
550
551 case T_NO_FPU:
552 case 32: /* XXX */
553 fpnoextflt();
554 return;
555
556 case T_FPU_FAULT:
557 fpextovrflt();
558 return;
559
560 case 10: /* invalid TSS == iret with NT flag set */
561 exc = EXC_BAD_INSTRUCTION;
562 code = EXC_I386_INVTSSFLT;
563 subcode = regs->err & 0xffff;
564 break;
565
566 case T_SEGMENT_NOT_PRESENT:
567 #if FPE
568 if (fp_emul_error(regs))
569 return;
570 #endif /* FPE */
571
572 exc = EXC_BAD_INSTRUCTION;
573 code = EXC_I386_SEGNPFLT;
574 subcode = regs->err & 0xffff;
575 break;
576
577 case T_STACK_FAULT:
578 exc = EXC_BAD_INSTRUCTION;
579 code = EXC_I386_STKFLT;
580 subcode = regs->err & 0xffff;
581 break;
582
583 case T_GENERAL_PROTECTION:
584 if (!(regs->efl & EFL_VM)) {
585 if (check_io_fault(regs))
586 return;
587 }
588 exc = EXC_BAD_INSTRUCTION;
589 code = EXC_I386_GPFLT;
590 subcode = regs->err & 0xffff;
591 break;
592
593 case T_PAGE_FAULT:
594 subcode = regs->cr2;
595 (void) vm_fault(thread->task->map,
596 trunc_page((vm_offset_t)subcode),
597 (regs->err & T_PF_WRITE)
598 ? VM_PROT_READ|VM_PROT_WRITE
599 : VM_PROT_READ,
600 FALSE,
601 FALSE,
602 user_page_fault_continue);
603 /*NOTREACHED*/
604 break;
605
606 case T_FLOATING_POINT_ERROR:
607 fpexterrflt();
608 return;
609
610 default:
611 #if MACH_TTD
612 if (kttd_enabled && kttd_trap(type, regs->err, regs))
613 return;
614 #endif /* MACH_TTD */
615 #if MACH_KDB
616 if (kdb_trap(type, regs->err, regs))
617 return;
618 #endif /* MACH_KDB */
619 panic("Trap: trap type %d, code = %x, pc = %x\n",
620 type, regs->err, regs->eip);
621 return;
622 }
623
624 #if MACH_TTD
625 if (debug_all_traps_with_kttd && kttd_trap(type, regs->err, regs))
626 return;
627 #endif /* MACH_TTD */
628 #if MACH_KDB
629 if (debug_all_traps_with_kdb &&
630 kdb_trap(type, regs->err, regs))
631 return;
632 #endif /* MACH_KDB */
633
634 i386_exception(exc, code, subcode);
635 /*NOTREACHED*/
636 }
637
638 /*
639 * V86 mode assist for interrupt handling.
640 */
641 boolean_t v86_assist_on = TRUE;
642 boolean_t v86_unsafe_ok = FALSE;
643 boolean_t v86_do_sti_cli = TRUE;
644 boolean_t v86_do_sti_immediate = FALSE;
645
646 #define V86_IRET_PENDING 0x4000
647
648 int cli_count = 0;
649 int sti_count = 0;
650
651 boolean_t
652 v86_assist(
653 thread_t thread,
654 register struct i386_saved_state *regs)
655 {
656 register struct v86_assist_state *v86 = &thread->pcb->ims.v86s;
657
658 /*
659 * Build an 8086 address. Use only when off is known to be 16 bits.
660 */
661 #define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off))
662
663 #define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \
664 | EFL_SF | EFL_ZF | EFL_AF \
665 | EFL_PF | EFL_CF )
666 struct iret_32 {
667 int eip;
668 int cs;
669 int eflags;
670 };
671 struct iret_16 {
672 unsigned short ip;
673 unsigned short cs;
674 unsigned short flags;
675 };
676 union iret_struct {
677 struct iret_32 iret_32;
678 struct iret_16 iret_16;
679 };
680
681 struct int_vec {
682 unsigned short ip;
683 unsigned short cs;
684 };
685
686 if (!v86_assist_on)
687 return FALSE;
688
689 /*
690 * If delayed STI pending, enable interrupts.
691 * Turn off tracing if on only to delay STI.
692 */
693 if (v86->flags & V86_IF_PENDING) {
694 v86->flags &= ~V86_IF_PENDING;
695 v86->flags |= EFL_IF;
696 if ((v86->flags & EFL_TF) == 0)
697 regs->efl &= ~EFL_TF;
698 }
699
700 if (regs->trapno == T_DEBUG) {
701
702 if (v86->flags & EFL_TF) {
703 /*
704 * Trace flag was also set - it has priority
705 */
706 return FALSE; /* handle as single-step */
707 }
708 /*
709 * Fall through to check for interrupts.
710 */
711 }
712 else if (regs->trapno == T_GENERAL_PROTECTION) {
713 /*
714 * General protection error - must be an 8086 instruction
715 * to emulate.
716 */
717 register int eip;
718 boolean_t addr_32 = FALSE;
719 boolean_t data_32 = FALSE;
720 int io_port;
721
722 /*
723 * Set up error handler for bad instruction/data
724 * fetches.
725 */
726 asm("movl $(addr_error), %0" : "=m" (thread->recover));
727
728 eip = regs->eip;
729 while (TRUE) {
730 unsigned char opcode;
731
732 if (eip > 0xFFFF) {
733 thread->recover = 0;
734 return FALSE; /* GP fault: IP out of range */
735 }
736
737 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
738 eip++;
739 switch (opcode) {
740 case 0xf0: /* lock */
741 case 0xf2: /* repne */
742 case 0xf3: /* repe */
743 case 0x2e: /* cs */
744 case 0x36: /* ss */
745 case 0x3e: /* ds */
746 case 0x26: /* es */
747 case 0x64: /* fs */
748 case 0x65: /* gs */
749 /* ignore prefix */
750 continue;
751
752 case 0x66: /* data size */
753 data_32 = TRUE;
754 continue;
755
756 case 0x67: /* address size */
757 addr_32 = TRUE;
758 continue;
759
760 case 0xe4: /* inb imm */
761 case 0xe5: /* inw imm */
762 case 0xe6: /* outb imm */
763 case 0xe7: /* outw imm */
764 io_port = *(unsigned char *)Addr8086(regs->cs, eip);
765 eip++;
766 goto do_in_out;
767
768 case 0xec: /* inb dx */
769 case 0xed: /* inw dx */
770 case 0xee: /* outb dx */
771 case 0xef: /* outw dx */
772 case 0x6c: /* insb */
773 case 0x6d: /* insw */
774 case 0x6e: /* outsb */
775 case 0x6f: /* outsw */
776 io_port = regs->edx & 0xffff;
777
778 do_in_out:
779 if (!data_32)
780 opcode |= 0x6600; /* word IO */
781
782 switch (emulate_io(regs, opcode, io_port)) {
783 case EM_IO_DONE:
784 /* instruction executed */
785 break;
786 case EM_IO_RETRY:
787 /* port mapped, retry instruction */
788 thread->recover = 0;
789 return TRUE;
790 case EM_IO_ERROR:
791 /* port not mapped */
792 thread->recover = 0;
793 return FALSE;
794 }
795 break;
796
797 case 0xfa: /* cli */
798 if (!v86_do_sti_cli) {
799 thread->recover = 0;
800 return FALSE;
801 }
802
803 v86->flags &= ~EFL_IF;
804 /* disable simulated interrupts */
805 cli_count++;
806 break;
807
808 case 0xfb: /* sti */
809 if (!v86_do_sti_cli) {
810 thread->recover = 0;
811 return FALSE;
812 }
813
814 if ((v86->flags & EFL_IF) == 0) {
815 if (v86_do_sti_immediate) {
816 v86->flags |= EFL_IF;
817 } else {
818 v86->flags |= V86_IF_PENDING;
819 regs->efl |= EFL_TF;
820 }
821 /* single step to set IF next inst. */
822 }
823 sti_count++;
824 break;
825
826 case 0x9c: /* pushf */
827 {
828 int flags;
829 vm_offset_t sp;
830 int size;
831
832 flags = regs->efl;
833 if ((v86->flags & EFL_IF) == 0)
834 flags &= ~EFL_IF;
835
836 if ((v86->flags & EFL_TF) == 0)
837 flags &= ~EFL_TF;
838 else flags |= EFL_TF;
839
840 sp = regs->uesp;
841 if (!addr_32)
842 sp &= 0xffff;
843 else if (sp > 0xffff)
844 goto stack_error;
845 size = (data_32) ? 4 : 2;
846 if (sp < size)
847 goto stack_error;
848 sp -= size;
849 if (copyout(&flags,
850 (unsigned short *)Addr8086(regs->ss,sp),
851 size))
852 goto addr_error;
853 if (addr_32)
854 regs->uesp = sp;
855 else
856 regs->uesp = (regs->uesp & 0xffff0000) | sp;
857 break;
858 }
859
860 case 0x9d: /* popf */
861 {
862 vm_offset_t sp;
863 int nflags;
864
865 sp = regs->uesp;
866 if (!addr_32)
867 sp &= 0xffff;
868 else if (sp > 0xffff)
869 goto stack_error;
870
871 if (data_32) {
872 if (sp > 0xffff - sizeof(int))
873 goto stack_error;
874 nflags = *(int *)Addr8086(regs->ss,sp);
875 sp += sizeof(int);
876 }
877 else {
878 if (sp > 0xffff - sizeof(short))
879 goto stack_error;
880 nflags = *(unsigned short *)
881 Addr8086(regs->ss,sp);
882 sp += sizeof(short);
883 }
884 if (addr_32)
885 regs->uesp = sp;
886 else
887 regs->uesp = (regs->uesp & 0xffff0000) | sp;
888
889 if (v86->flags & V86_IRET_PENDING) {
890 v86->flags = nflags & (EFL_TF | EFL_IF);
891 v86->flags |= V86_IRET_PENDING;
892 } else {
893 v86->flags = nflags & (EFL_TF | EFL_IF);
894 }
895 regs->efl = (regs->efl & ~EFL_V86_SAFE)
896 | (nflags & EFL_V86_SAFE);
897 break;
898 }
899 case 0xcf: /* iret */
900 {
901 vm_offset_t sp;
902 int nflags;
903 union iret_struct iret_struct;
904
905 v86->flags &= ~V86_IRET_PENDING;
906 sp = regs->uesp;
907 if (!addr_32)
908 sp &= 0xffff;
909 else if (sp > 0xffff)
910 goto stack_error;
911
912 if (data_32) {
913 if (sp > 0xffff - sizeof(struct iret_32))
914 goto stack_error;
915 iret_struct.iret_32 =
916 *(struct iret_32 *) Addr8086(regs->ss,sp);
917 sp += sizeof(struct iret_32);
918 }
919 else {
920 if (sp > 0xffff - sizeof(struct iret_16))
921 goto stack_error;
922 iret_struct.iret_16 =
923 *(struct iret_16 *) Addr8086(regs->ss,sp);
924 sp += sizeof(struct iret_16);
925 }
926 if (addr_32)
927 regs->uesp = sp;
928 else
929 regs->uesp = (regs->uesp & 0xffff0000) | sp;
930
931 if (data_32) {
932 eip = iret_struct.iret_32.eip;
933 regs->cs = iret_struct.iret_32.cs & 0xffff;
934 nflags = iret_struct.iret_32.eflags;
935 }
936 else {
937 eip = iret_struct.iret_16.ip;
938 regs->cs = iret_struct.iret_16.cs;
939 nflags = iret_struct.iret_16.flags;
940 }
941
942 v86->flags = nflags & (EFL_TF | EFL_IF);
943 regs->efl = (regs->efl & ~EFL_V86_SAFE)
944 | (nflags & EFL_V86_SAFE);
945 break;
946 }
947 default:
948 /*
949 * Instruction not emulated here.
950 */
951 thread->recover = 0;
952 return FALSE;
953 }
954 break; /* exit from 'while TRUE' */
955 }
956 regs->eip = (regs->eip & 0xffff0000) | eip;
957 }
958 else {
959 /*
960 * Not a trap we handle.
961 */
962 thread->recover = 0;
963 return FALSE;
964 }
965
966 if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
967
968 struct v86_interrupt_table *int_table;
969 int int_count;
970 int vec;
971 int i;
972
973 int_table = (struct v86_interrupt_table *) v86->int_table;
974 int_count = v86->int_count;
975
976 vec = 0;
977 for (i = 0; i < int_count; int_table++, i++) {
978 if (!int_table->mask && int_table->count > 0) {
979 int_table->count--;
980 vec = int_table->vec;
981 break;
982 }
983 }
984 if (vec != 0) {
985 /*
986 * Take this interrupt
987 */
988 vm_offset_t sp;
989 struct iret_16 iret_16;
990 struct int_vec int_vec;
991
992 sp = regs->uesp & 0xffff;
993 if (sp < sizeof(struct iret_16))
994 goto stack_error;
995 sp -= sizeof(struct iret_16);
996 iret_16.ip = regs->eip;
997 iret_16.cs = regs->cs;
998 iret_16.flags = regs->efl & 0xFFFF;
999 if ((v86->flags & EFL_TF) == 0)
1000 iret_16.flags &= ~EFL_TF;
1001 else iret_16.flags |= EFL_TF;
1002
1003 #ifdef gcc_1_36_worked
1004 int_vec = ((struct int_vec *)0)[vec];
1005 #else
1006 bcopy((struct int_vec *) (sizeof(struct int_vec) * vec),
1007 &int_vec,
1008 sizeof (struct int_vec));
1009 #endif
1010 if (copyout(&iret_16,
1011 (struct iret_16 *)Addr8086(regs->ss,sp),
1012 sizeof(struct iret_16)))
1013 goto addr_error;
1014 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
1015 regs->eip = int_vec.ip;
1016 regs->cs = int_vec.cs;
1017 regs->efl &= ~EFL_TF;
1018 v86->flags &= ~(EFL_IF | EFL_TF);
1019 v86->flags |= V86_IRET_PENDING;
1020 }
1021 }
1022
1023 thread->recover = 0;
1024 return TRUE;
1025
1026 /*
1027 * On address error, report a page fault.
1028 * XXX report GP fault - we don`t save
1029 * the faulting address.
1030 */
1031 addr_error:
1032 asm("addr_error:;");
1033 thread->recover = 0;
1034 return FALSE;
1035
1036 /*
1037 * On stack address error, return stack fault (12).
1038 */
1039 stack_error:
1040 thread->recover = 0;
1041 regs->trapno = T_STACK_FAULT;
1042 return FALSE;
1043 }
1044
1045 /*
1046 * Handle AST traps for i386.
1047 * Check for delayed floating-point exception from
1048 * AT-bus machines.
1049 */
1050 void
1051 i386_astintr(void)
1052 {
1053 int mycpu = cpu_number();
1054
1055 (void) splsched(); /* block interrupts to check reasons */
1056 if (need_ast[mycpu] & AST_I386_FP) {
1057 /*
1058 * AST was for delayed floating-point exception -
1059 * FP interrupt occured while in kernel.
1060 * Turn off this AST reason and handle the FPU error.
1061 */
1062 ast_off(mycpu, AST_I386_FP);
1063 (void) spl0();
1064
1065 fpexterrflt();
1066 }
1067 else {
1068 /*
1069 * Not an FPU trap. Handle the AST.
1070 * Interrupts are still blocked.
1071 */
1072 ast_taken();
1073 }
1074 }
1075
1076 /*
1077 * Handle exceptions for i386.
1078 *
1079 * If we are an AT bus machine, we must turn off the AST for a
1080 * delayed floating-point exception.
1081 *
1082 * If we are providing floating-point emulation, we may have
1083 * to retrieve the real register values from the floating point
1084 * emulator.
1085 */
1086 no_return
1087 i386_exception(
1088 int exc,
1089 int code,
1090 int subcode)
1091 {
1092 spl_t s;
1093
1094 /*
1095 * Turn off delayed FPU error handling.
1096 */
1097 s = splsched();
1098 ast_off(cpu_number(), AST_I386_FP);
1099 splx(s);
1100
1101 #if FPE
1102 fpe_exception_fixup(exc, code, subcode);
1103 #else
1104 exception(exc, code, subcode);
1105 #endif
1106 /*NOTREACHED*/
1107 }
1108
1109 boolean_t
1110 check_io_fault(
1111 struct i386_saved_state *regs)
1112 {
1113 int eip, opcode, io_port;
1114 boolean_t data_16 = FALSE;
1115
1116 /*
1117 * Get the instruction.
1118 */
1119 eip = regs->eip;
1120
1121 for (;;) {
1122 opcode = inst_fetch(eip, regs->cs);
1123 eip++;
1124 switch (opcode) {
1125 case 0x66: /* data-size prefix */
1126 data_16 = TRUE;
1127 continue;
1128
1129 case 0xf3: /* rep prefix */
1130 case 0x26: /* es */
1131 case 0x2e: /* cs */
1132 case 0x36: /* ss */
1133 case 0x3e: /* ds */
1134 case 0x64: /* fs */
1135 case 0x65: /* gs */
1136 continue;
1137
1138 case 0xE4: /* inb imm */
1139 case 0xE5: /* inl imm */
1140 case 0xE6: /* outb imm */
1141 case 0xE7: /* outl imm */
1142 /* port is immediate byte */
1143 io_port = inst_fetch(eip, regs->cs);
1144 eip++;
1145 break;
1146
1147 case 0xEC: /* inb dx */
1148 case 0xED: /* inl dx */
1149 case 0xEE: /* outb dx */
1150 case 0xEF: /* outl dx */
1151 case 0x6C: /* insb */
1152 case 0x6D: /* insl */
1153 case 0x6E: /* outsb */
1154 case 0x6F: /* outsl */
1155 /* port is in DX register */
1156 io_port = regs->edx & 0xFFFF;
1157 break;
1158
1159 default:
1160 return FALSE;
1161 }
1162 break;
1163 }
1164
1165 if (data_16)
1166 opcode |= 0x6600; /* word IO */
1167
1168 switch (emulate_io(regs, opcode, io_port)) {
1169 case EM_IO_DONE:
1170 /* instruction executed */
1171 regs->eip = eip;
1172 return TRUE;
1173
1174 case EM_IO_RETRY:
1175 /* port mapped, retry instruction */
1176 return TRUE;
1177
1178 case EM_IO_ERROR:
1179 /* port not mapped */
1180 return FALSE;
1181 }
1182 }
1183
1184
Cache object: 2eb08c69dab9ba62e4789b32688e8dae
|