The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/trap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
    4  * All Rights Reserved.
    5  * 
    6  * Permission to use, copy, modify and distribute this software and its
    7  * documentation is hereby granted, provided that both the copyright
    8  * notice and this permission notice appear in all copies of the
    9  * software, derivative works or modified versions, and any portions
   10  * thereof, and that both notices appear in supporting documentation.
   11  * 
   12  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   13  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   14  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   15  * 
   16  * Carnegie Mellon requests users of this software to return to
   17  * 
   18  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   19  *  School of Computer Science
   20  *  Carnegie Mellon University
   21  *  Pittsburgh PA 15213-3890
   22  * 
   23  * any improvements or extensions that they make and grant Carnegie Mellon
   24  * the rights to redistribute these changes.
   25  */
   26 /*
   27  * HISTORY
   28  * $Log:        trap.c,v $
   29  * Revision 2.23  93/05/15  19:30:47  mrt
   30  *      machparam.h -> machspl.h
   31  * 
   32  * Revision 2.22  93/05/10  23:23:53  rvb
   33  *      Checkin for MK80 branch.
   34  *      [93/05/10  15:12:36  grm]
   35  * 
   36  * Revision 2.21.1.1  93/03/01  15:24:27  grm
   37  *      Added TTD teledebug code to handle traps.  Code mirrors ddb.
   38  *      [93/03/01            grm]
   39  * 
   40  * Revision 2.21  93/01/24  13:14:55  danner
   41  *      Installed pc sampling from C Maeda; added interrupted_pc().
   42  *      [93/01/12            rvb]
   43  * 
   44  * Revision 2.20  93/01/14  17:29:45  danner
   45  *      Proper spl typing.
   46  *      [92/11/30            af]
   47  * 
   48  * Revision 2.19  92/01/03  20:09:29  dbg
   49  *      Build retry table for certain successful faults.
   50  *      Enable IO instruction emulation in V86 mode.
   51  *      [91/12/01            dbg]
   52  * 
   53  *      Add i386_astintr to handle delayed floating-point exceptions.
   54  *      [91/10/29            dbg]
   55  * 
   56  *      Check for use of user FP register segment if floating-point
   57  *      emulator present.  Pass i386 trap number as exception code
   58  *      for all i386 exceptions.  Route i386 exceptions through
   59  *      emulator fixup routine if exception taken within emulator.
   60  * 
   61  *      Eliminate warning on 'ifdef'.  Remove offending type
   62  *      declarations.
   63  *      [91/10/19            dbg]
   64  * 
   65  * Revision 2.18  91/10/09  16:07:23  af
   66  *      Checked kdb trap for user space T_DEBUG and T_INT3.
   67  *      [91/08/29            tak]
   68  * 
   69  * Revision 2.17  91/08/28  21:37:16  jsb
   70  *      Don't emulate IO instructions if in V86 mode.
   71  *      [91/08/21            dbg]
   72  * 
   73  * Revision 2.16  91/08/24  11:57:09  af
   74  *      Revision 2.15.3.1  91/08/19  13:45:20  danner
   75  *      Make the file safe for gcc 1.36.  There is a really bizarro
   76  *      structure assignment of an array that starts at zero that
   77  *      nukes us.
   78  *      [91/08/07            rvb]
   79  * 
   80  * Revision 2.15.3.1  91/08/19  13:45:20  danner
   81  *      Make the file safe for gcc 1.36.  There is a really bizarro
   82  *      structure assignment of an array that starts at zero that
   83  *      nukes us.
   84  *      [91/08/07            rvb]
   85  * 
   86  * Revision 2.15  91/07/31  17:42:21  dbg
   87  *      Separate user and kernel trap cases.  Combine user and v86-mode
   88  *      trap cases (except for calling instruction assist).
   89  * 
   90  *      New v86 interrupt simulation.
   91  * 
   92  *      Check for two copyout failure locations.
   93  *      [91/07/30  17:01:10  dbg]
   94  * 
   95  * Revision 2.14  91/06/06  17:04:06  jsb
   96  *      i386_read_fault is now intel_read_fault.
   97  *      [91/05/13  16:56:39  jsb]
   98  * 
   99  * Revision 2.13  91/05/14  16:18:11  mrt
  100  *      Correcting copyright
  101  * 
  102  * Revision 2.12  91/05/08  12:43:35  dbg
  103  *      Correct calls to FPU error routines.
  104  *      [91/04/26  14:39:33  dbg]
  105  * 
  106  * Revision 2.11  91/03/16  14:45:28  rpd
  107  *      Added resume, continuation arguments to vm_fault.
  108  *      Added user_page_fault_continue.
  109  *      [91/02/05            rpd]
  110  *      Removed astintr.
  111  *      [91/01/22  15:53:33  rpd]
  112  * 
  113  * Revision 2.10  91/02/14  14:41:59  mrt
  114  *      rfr's latest changes to v86 assist
  115  *      [91/01/28  15:25:30  rvb]
  116  * 
  117  * Revision 2.9  91/02/05  17:15:21  mrt
  118  *      Changed to new Mach copyright
  119  *      [91/02/01  17:38:41  mrt]
  120  * 
  121  * Revision 2.8  91/01/09  22:41:55  rpd
  122  *      Fixed a merge bug.
  123  *      [91/01/09            rpd]
  124  * 
  125  * Revision 2.7  91/01/08  17:32:21  rpd
  126  *      Add v86_hdw_assist().
  127  *      [91/01/04  09:54:24  rvb]
  128  * 
  129  *      Basically add trapv86()
  130  *      [90/12/20  10:21:01  rvb]
  131  * 
  132  * Revision 2.6  91/01/08  15:11:18  rpd
  133  *      Only need csw_needed in AST exit path.
  134  *      [90/12/27            rpd]
  135  * 
  136  *      Replaced thread_doexception with new exception interface.
  137  *      [90/12/21            rpd]
  138  *      Added continuation argument to thread_block.
  139  *      [90/12/08            rpd]
  140  * 
  141  * Revision 2.5  90/10/25  14:44:56  rwd
  142  *      Added watchpoint support.
  143  *      [90/10/18            rpd]
  144  * 
  145  * Revision 2.4  90/06/02  14:48:58  rpd
  146  *      Updated to new csw_needed macro.
  147  *      [90/06/02            rpd]
  148  * 
  149  * Revision 2.3  90/05/21  13:26:49  dbg
  150  *      Add hook for emulating IO instructions.
  151  *      [90/05/17            dbg]
  152  * 
  153  * Revision 2.2  90/05/03  15:38:07  dbg
  154  *      V86 mode is also user mode.
  155  *      [90/04/26            dbg]
  156  * 
  157  *      Created (from VAX version).
  158  *      [90/02/08            dbg]
  159  * 
  160  */
  161 /*
  162  * Hardware trap/fault handler.
  163  */
  164 
  165 #include <cpus.h>
  166 #include <fpe.h>
  167 #include <mach_kdb.h>
  168 #include <mach_ttd.h>
  169 #include <mach_pcsample.h>
  170 
  171 #include <sys/types.h>
  172 #include <i386/eflags.h>
  173 #include <i386/trap.h>
  174 #include <machine/machspl.h>    /* for spl_t */
  175 
  176 #include <mach/exception.h>
  177 #include <mach/kern_return.h>
  178 #include <mach/vm_param.h>
  179 #include <mach/i386/thread_status.h>
  180 
  181 #include <vm/vm_kern.h>
  182 #include <vm/vm_map.h>
  183 
  184 #include <kern/ast.h>
  185 #include <kern/thread.h>
  186 #include <kern/task.h>
  187 #include <kern/sched.h>
  188 #include <kern/sched_prim.h>
  189 
  190 #include <i386/io_emulate.h>
  191 
  192 extern void exception();
  193 extern void thread_exception_return();
  194 
  195 extern void i386_exception();
  196 
  197 #if     MACH_KDB
  198 boolean_t       debug_all_traps_with_kdb = FALSE;
  199 extern struct db_watchpoint *db_watchpoint_list;
  200 extern boolean_t db_watchpoints_inserted;
  201 
  202 void
  203 thread_kdb_return()
  204 {
  205         register thread_t thread = current_thread();
  206         register struct i386_saved_state *regs = USER_REGS(thread);
  207 
  208         if (kdb_trap(regs->trapno, regs->err, regs)) {
  209                 thread_exception_return();
  210                 /*NOTREACHED*/
  211         }
  212 }
  213 #endif  MACH_KDB
  214 
  215 #if     MACH_TTD
  216 extern boolean_t kttd_enabled;
  217 boolean_t debug_all_traps_with_kttd = TRUE;
  218 #endif  MACH_TTD
  219 
  220 void
  221 user_page_fault_continue(kr)
  222         kern_return_t kr;
  223 {
  224         register thread_t thread = current_thread();
  225         register struct i386_saved_state *regs = USER_REGS(thread);
  226 
  227         if (kr == KERN_SUCCESS) {
  228 #if     MACH_KDB
  229                 if (db_watchpoint_list &&
  230                     db_watchpoints_inserted &&
  231                     (regs->err & T_PF_WRITE) &&
  232                     db_find_watchpoint(thread->task->map,
  233                                        (vm_offset_t)regs->cr2,
  234                                        regs))
  235                         kdb_trap(T_WATCHPOINT, 0, regs);
  236 #endif  MACH_KDB
  237                 thread_exception_return();
  238                 /*NOTREACHED*/
  239         }
  240 
  241 #if     MACH_KDB
  242         if (debug_all_traps_with_kdb &&
  243             kdb_trap(regs->trapno, regs->err, regs)) {
  244                 thread_exception_return();
  245                 /*NOTREACHED*/
  246         }
  247 #endif  MACH_KDB
  248 
  249         i386_exception(EXC_BAD_ACCESS, kr, regs->cr2);
  250         /*NOTREACHED*/
  251 }
  252 
  253 /*
  254  * Fault recovery in copyin/copyout routines.
  255  */
  256 struct recovery {
  257         int     fault_addr;
  258         int     recover_addr;
  259 };
  260 
  261 extern struct recovery  recover_table[];
  262 extern struct recovery  recover_table_end[];
  263 
  264 /*
  265  * Recovery from Successful fault in copyout does not
  266  * return directly - it retries the pte check, since
  267  * the 386 ignores write protection in kernel mode.
  268  */
  269 extern struct recovery  retry_table[];
  270 extern struct recovery  retry_table_end[];
  271 
  272 char *  trap_type[] = {
  273         "Divide error",
  274         "Debug trap",
  275         "NMI",
  276         "Breakpoint",
  277         "Overflow",
  278         "Bounds check",
  279         "Invalid opcode",
  280         "No coprocessor",
  281         "Double fault",
  282         "Coprocessor overrun",
  283         "Invalid TSS",
  284         "Segment not present",
  285         "Stack bounds",
  286         "General protection",
  287         "Page fault",
  288         "(reserved)",
  289         "Coprocessor error"
  290 };
  291 int     TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
  292 
  293 boolean_t       brb = TRUE;
  294 
  295 /*
  296  * Trap from kernel mode.  Only page-fault errors are recoverable,
  297  * and then only in special circumstances.  All other errors are
  298  * fatal.
  299  */
  300 void kernel_trap(regs)
  301         register struct i386_saved_state *regs;
  302 {
  303         int     exc;
  304         int     code;
  305         int     subcode;
  306         register int    type;
  307         vm_map_t        map;
  308         kern_return_t   result;
  309         register thread_t       thread;
  310 
  311         type = regs->trapno;
  312         code = regs->err;
  313         thread = current_thread();
  314 
  315         switch (type) {
  316             case T_NO_FPU:
  317                 fpnoextflt();
  318                 return;
  319 
  320             case T_FPU_FAULT:
  321                 fpextovrflt();
  322                 return;
  323 
  324             case T_FLOATING_POINT_ERROR:
  325                 fpexterrflt();
  326                 return;
  327 
  328             case T_PAGE_FAULT:
  329                 /*
  330                  * If the current map is a submap of the kernel map,
  331                  * and the address is within that map, fault on that
  332                  * map.  If the same check is done in vm_fault
  333                  * (vm_map_lookup), we may deadlock on the kernel map
  334                  * lock.
  335                  */
  336                 subcode = regs->cr2;    /* get faulting address */
  337 
  338                 if (thread == THREAD_NULL)
  339                     map = kernel_map;
  340                 else {
  341                     map = thread->task->map;
  342                     if ((vm_offset_t)subcode < vm_map_min(map) ||
  343                         (vm_offset_t)subcode >= vm_map_max(map))
  344                         map = kernel_map;
  345                 }
  346 
  347                 /*
  348                  * Since the 386 ignores write protection in
  349                  * kernel mode, always try for write permission
  350                  * first.  If that fails and the fault was a
  351                  * read fault, retry with read permission.
  352                  */
  353                 result = vm_fault(map,
  354                                   trunc_page((vm_offset_t)subcode),
  355                                   VM_PROT_READ|VM_PROT_WRITE,
  356                                   FALSE,
  357                                   FALSE,
  358                                   (void (*)()) 0);
  359 #if     MACH_KDB
  360                 if (result == KERN_SUCCESS) {
  361                     /* Look for watchpoints */
  362                     if (db_watchpoint_list &&
  363                         db_watchpoints_inserted &&
  364                         (code & T_PF_WRITE) &&
  365                         db_find_watchpoint(map,
  366                                 (vm_offset_t)subcode, regs))
  367                         kdb_trap(T_WATCHPOINT, 0, regs);
  368                 }
  369                 else
  370 #endif  MACH_KDB
  371                 if ((code & T_PF_WRITE) == 0 &&
  372                     result == KERN_PROTECTION_FAILURE)
  373                 {
  374                     /*
  375                      *  Must expand vm_fault by hand,
  376                      *  so that we can ask for read-only access
  377                      *  but enter a (kernel)writable mapping.
  378                      */
  379                     result = intel_read_fault(map,
  380                                           trunc_page((vm_offset_t)subcode));
  381                 }
  382 
  383                 if (result == KERN_SUCCESS) {
  384                     /*
  385                      * Certain faults require that we back up
  386                      * the EIP.
  387                      */
  388                     register struct recovery *rp;
  389 
  390                     for (rp = retry_table; rp < retry_table_end; rp++) {
  391                         if (regs->eip == rp->fault_addr) {
  392                             regs->eip = rp->recover_addr;
  393                             break;
  394                         }
  395                     }
  396                     return;
  397                 }
  398 
  399                 /*
  400                  * If there is a failure recovery address
  401                  * for this fault, go there.
  402                  */
  403                 {
  404                     register struct recovery *rp;
  405 
  406                     for (rp = recover_table;
  407                          rp < recover_table_end;
  408                          rp++) {
  409                         if (regs->eip == rp->fault_addr) {
  410                             regs->eip = rp->recover_addr;
  411                             return;
  412                         }
  413                     }
  414                 }
  415 
  416                 /*
  417                  * Check thread recovery address also -
  418                  * v86 assist uses it.
  419                  */
  420                 if (thread->recover) {
  421                     regs->eip = thread->recover;
  422                     thread->recover = 0;
  423                     return;
  424                 }
  425 
  426                 /*
  427                  * Unanticipated page-fault errors in kernel
  428                  * should not happen.
  429                  */
  430                 /* fall through */
  431 
  432             default:
  433 #if     MACH_TTD
  434                 if (kttd_enabled && kttd_trap(type, code, regs))
  435                         return;
  436 #endif  /* MACH_TTD */
  437 #if     MACH_KDB
  438                 if (kdb_trap(type, code, regs))
  439                     return;
  440 #endif  MACH_KDB
  441                 printf("trap type %d, code = %x, pc = %x\n",
  442                         type, code, regs->eip);
  443                 panic("trap");
  444                 return;
  445         }
  446 }
  447 
  448 
  449 /*
  450  *      Trap from user mode.
  451  */
  452 void user_trap(regs)
  453         register struct i386_saved_state *regs;
  454 {
  455         int     exc;
  456         int     code;
  457         int     subcode;
  458         register int    type;
  459         vm_map_t        map;
  460         kern_return_t   result;
  461         register thread_t thread = current_thread();
  462 
  463         if (regs->efl & EFL_VM) {
  464             /*
  465              * If hardware assist can handle exception,
  466              * continue execution.
  467              */
  468             if (v86_assist(thread, regs))
  469                 return;
  470         }
  471 
  472         type = regs->trapno;
  473         code = 0;
  474         subcode = 0;
  475 
  476         switch (type) {
  477 
  478             case T_DIVIDE_ERROR:
  479                 exc = EXC_ARITHMETIC;
  480                 code = EXC_I386_DIV;
  481                 break;
  482 
  483             case T_DEBUG:
  484 #if     MACH_TTD
  485                 if (kttd_enabled && kttd_in_single_step()) {
  486                         if (kttd_trap(type, regs->err, regs))
  487                                 return;
  488                 }
  489 #endif  /* MACH_TTD */
  490 #if     MACH_KDB
  491                 if (db_in_single_step()) {
  492                     if (kdb_trap(type, regs->err, regs))
  493                         return;
  494                 }
  495 #endif
  496                 exc = EXC_BREAKPOINT;
  497                 code = EXC_I386_SGL;
  498                 break;
  499 
  500             case T_INT3:
  501 #if     MACH_TTD
  502                 if (kttd_enabled && kttd_trap(type, regs->err, regs))
  503                         return;
  504                 break;
  505 #endif  /* MACH_TTD */
  506 #if     MACH_KDB
  507             {
  508                 boolean_t db_find_breakpoint_here();
  509 
  510                 if (db_find_breakpoint_here(
  511                         (current_thread())? current_thread()->task: TASK_NULL,
  512                         regs->eip - 1)) {
  513                     if (kdb_trap(type, regs->err, regs))
  514                         return;
  515                 }
  516             }
  517 #endif
  518                 exc = EXC_BREAKPOINT;
  519                 code = EXC_I386_BPT;
  520                 break;
  521 
  522             case T_OVERFLOW:
  523                 exc = EXC_ARITHMETIC;
  524                 code = EXC_I386_INTO;
  525                 break;
  526 
  527             case T_OUT_OF_BOUNDS:
  528                 exc = EXC_SOFTWARE;
  529                 code = EXC_I386_BOUND;
  530                 break;
  531 
  532             case T_INVALID_OPCODE:
  533                 exc = EXC_BAD_INSTRUCTION;
  534                 code = EXC_I386_INVOP;
  535                 break;
  536 
  537             case T_NO_FPU:
  538             case 32:            /* XXX */
  539                 fpnoextflt();
  540                 return;
  541 
  542             case T_FPU_FAULT:
  543                 fpextovrflt();
  544                 return;
  545 
  546             case 10:            /* invalid TSS == iret with NT flag set */
  547                 exc = EXC_BAD_INSTRUCTION;
  548                 code = EXC_I386_INVTSSFLT;
  549                 subcode = regs->err & 0xffff;
  550                 break;
  551 
  552             case T_SEGMENT_NOT_PRESENT:
  553 #if     FPE
  554                 if (fp_emul_error(regs))
  555                     return;
  556 #endif  /* FPE */
  557 
  558                 exc = EXC_BAD_INSTRUCTION;
  559                 code = EXC_I386_SEGNPFLT;
  560                 subcode = regs->err & 0xffff;
  561                 break;
  562 
  563             case T_STACK_FAULT:
  564                 exc = EXC_BAD_INSTRUCTION;
  565                 code = EXC_I386_STKFLT;
  566                 subcode = regs->err & 0xffff;
  567                 break;
  568 
  569             case T_GENERAL_PROTECTION:
  570                 if (!(regs->efl & EFL_VM)) {
  571                     if (check_io_fault(regs))
  572                         return;
  573                 }
  574                 exc = EXC_BAD_INSTRUCTION;
  575                 code = EXC_I386_GPFLT;
  576                 subcode = regs->err & 0xffff;
  577                 break;
  578 
  579             case T_PAGE_FAULT:
  580                 subcode = regs->cr2;
  581                 (void) vm_fault(thread->task->map,
  582                                 trunc_page((vm_offset_t)subcode),
  583                                 (regs->err & T_PF_WRITE)
  584                                   ? VM_PROT_READ|VM_PROT_WRITE
  585                                   : VM_PROT_READ,
  586                                 FALSE,
  587                                 FALSE,
  588                                 user_page_fault_continue);
  589                 /*NOTREACHED*/
  590                 break;
  591 
  592             case T_FLOATING_POINT_ERROR:
  593                 fpexterrflt();
  594                 return;
  595 
  596             default:
  597 #if     MACH_TTD
  598                 if (kttd_enabled && kttd_trap(type, regs->err, regs))
  599                         return;
  600 #endif  /* MACH_TTD */
  601 #if     MACH_KDB
  602                 if (kdb_trap(type, regs->err, regs))
  603                     return;
  604 #endif  MACH_KDB
  605                 printf("trap type %d, code = %x, pc = %x\n",
  606                        type, regs->err, regs->eip);
  607                 panic("trap");
  608                 return;
  609         }
  610 
  611 #if     MACH_TTD
  612         if (debug_all_traps_with_kttd && kttd_trap(type, regs->err, regs))
  613                 return;
  614 #endif  /* MACH_TTD */
  615 #if     MACH_KDB
  616         if (debug_all_traps_with_kdb &&
  617             kdb_trap(type, regs->err, regs))
  618                 return;
  619 #endif  MACH_KDB
  620 
  621         i386_exception(exc, code, subcode);
  622         /*NOTREACHED*/
  623 }
  624 
  625 /*
  626  *      V86 mode assist for interrupt handling.
  627  */
  628 boolean_t v86_assist_on = TRUE;
  629 boolean_t v86_unsafe_ok = FALSE;
  630 boolean_t v86_do_sti_cli = TRUE;
  631 boolean_t v86_do_sti_immediate = FALSE;
  632 
  633 #define V86_IRET_PENDING 0x4000
  634 
  635 int cli_count = 0;
  636 int sti_count = 0;
  637 
  638 boolean_t
  639 v86_assist(thread, regs)
  640         thread_t        thread;
  641         register struct i386_saved_state *regs;
  642 {
  643         register struct v86_assist_state *v86 = &thread->pcb->ims.v86s;
  644 
  645 /*
  646  * Build an 8086 address.  Use only when off is known to be 16 bits.
  647  */
  648 #define Addr8086(seg,off)       ((((seg) & 0xffff) << 4) + (off))
  649 
  650 #define EFL_V86_SAFE            (  EFL_OF | EFL_DF | EFL_TF \
  651                                  | EFL_SF | EFL_ZF | EFL_AF \
  652                                  | EFL_PF | EFL_CF )
  653         struct iret_32 {
  654                 int             eip;
  655                 int             cs;
  656                 int             eflags;
  657         };
  658         struct iret_16 {
  659                 unsigned short  ip;
  660                 unsigned short  cs;
  661                 unsigned short  flags;
  662         };
  663         union iret_struct {
  664                 struct iret_32  iret_32;
  665                 struct iret_16  iret_16;
  666         };
  667 
  668         struct int_vec {
  669                 unsigned short  ip;
  670                 unsigned short  cs;
  671         };
  672 
  673         if (!v86_assist_on)
  674             return FALSE;
  675 
  676         /*
  677          * If delayed STI pending, enable interrupts.
  678          * Turn off tracing if on only to delay STI.
  679          */
  680         if (v86->flags & V86_IF_PENDING) {
  681             v86->flags &= ~V86_IF_PENDING;
  682             v86->flags |=  EFL_IF;
  683             if ((v86->flags & EFL_TF) == 0)
  684                 regs->efl &= ~EFL_TF;
  685         }
  686 
  687         if (regs->trapno == T_DEBUG) {
  688 
  689             if (v86->flags & EFL_TF) {
  690                 /*
  691                  * Trace flag was also set - it has priority
  692                  */
  693                 return FALSE;                   /* handle as single-step */
  694             }
  695             /*
  696              * Fall through to check for interrupts.
  697              */
  698         }
  699         else if (regs->trapno == T_GENERAL_PROTECTION) {
  700             /*
  701              * General protection error - must be an 8086 instruction
  702              * to emulate.
  703              */
  704             register int        eip;
  705             boolean_t   addr_32 = FALSE;
  706             boolean_t   data_32 = FALSE;
  707             int         io_port;
  708 
  709             /*
  710              * Set up error handler for bad instruction/data
  711              * fetches.
  712              */
  713             asm("movl $(addr_error), %0" : "=m" (thread->recover));
  714 
  715             eip = regs->eip;
  716             while (TRUE) {
  717                 unsigned char   opcode;
  718 
  719                 if (eip > 0xFFFF) {
  720                     thread->recover = 0;
  721                     return FALSE;       /* GP fault: IP out of range */
  722                 }
  723 
  724                 opcode = *(unsigned char *)Addr8086(regs->cs,eip);
  725                 eip++;
  726                 switch (opcode) {
  727                     case 0xf0:          /* lock */
  728                     case 0xf2:          /* repne */
  729                     case 0xf3:          /* repe */
  730                     case 0x2e:          /* cs */
  731                     case 0x36:          /* ss */
  732                     case 0x3e:          /* ds */
  733                     case 0x26:          /* es */
  734                     case 0x64:          /* fs */
  735                     case 0x65:          /* gs */
  736                         /* ignore prefix */
  737                         continue;
  738 
  739                     case 0x66:          /* data size */
  740                         data_32 = TRUE;
  741                         continue;
  742 
  743                     case 0x67:          /* address size */
  744                         addr_32 = TRUE;
  745                         continue;
  746 
  747                     case 0xe4:          /* inb imm */
  748                     case 0xe5:          /* inw imm */
  749                     case 0xe6:          /* outb imm */
  750                     case 0xe7:          /* outw imm */
  751                         io_port = *(unsigned char *)Addr8086(regs->cs, eip);
  752                         eip++;
  753                         goto do_in_out;
  754 
  755                     case 0xec:          /* inb dx */
  756                     case 0xed:          /* inw dx */
  757                     case 0xee:          /* outb dx */
  758                     case 0xef:          /* outw dx */
  759                     case 0x6c:          /* insb */
  760                     case 0x6d:          /* insw */
  761                     case 0x6e:          /* outsb */
  762                     case 0x6f:          /* outsw */
  763                         io_port = regs->edx & 0xffff;
  764 
  765                     do_in_out:
  766                         if (!data_32)
  767                             opcode |= 0x6600;   /* word IO */
  768 
  769                         switch (emulate_io(regs, opcode, io_port)) {
  770                             case EM_IO_DONE:
  771                                 /* instruction executed */
  772                                 break;
  773                             case EM_IO_RETRY:
  774                                 /* port mapped, retry instruction */
  775                                 thread->recover = 0;
  776                                 return TRUE;
  777                             case EM_IO_ERROR:
  778                                 /* port not mapped */
  779                                 thread->recover = 0;
  780                                 return FALSE;
  781                         }
  782                         break;
  783 
  784                     case 0xfa:          /* cli */
  785                         if (!v86_do_sti_cli) {
  786                             thread->recover = 0;
  787                             return (FALSE);
  788                         }
  789 
  790                         v86->flags &= ~EFL_IF;
  791                                         /* disable simulated interrupts */
  792                         cli_count++;
  793                         break;
  794 
  795                     case 0xfb:          /* sti */
  796                         if (!v86_do_sti_cli) {
  797                             thread->recover = 0;
  798                             return (FALSE);
  799                         }
  800 
  801                         if ((v86->flags & EFL_IF) == 0) {
  802                             if (v86_do_sti_immediate) {
  803                                     v86->flags |= EFL_IF;
  804                             } else {
  805                                     v86->flags |= V86_IF_PENDING;
  806                                     regs->efl |= EFL_TF;
  807                             }
  808                                         /* single step to set IF next inst. */
  809                         }
  810                         sti_count++;
  811                         break;
  812 
  813                     case 0x9c:          /* pushf */
  814                     {
  815                         int     flags;
  816                         vm_offset_t sp;
  817                         int     size;
  818 
  819                         flags = regs->efl;
  820                         if ((v86->flags & EFL_IF) == 0)
  821                             flags &= ~EFL_IF;
  822 
  823                         if ((v86->flags & EFL_TF) == 0)
  824                             flags &= ~EFL_TF;
  825                         else flags |= EFL_TF;
  826 
  827                         sp = regs->uesp;
  828                         if (!addr_32)
  829                             sp &= 0xffff;
  830                         else if (sp > 0xffff)
  831                             goto stack_error;
  832                         size = (data_32) ? 4 : 2;
  833                         if (sp < size)
  834                             goto stack_error;
  835                         sp -= size;
  836                         if (copyout((char *)&flags,
  837                                     (char *)Addr8086(regs->ss,sp),
  838                                     size))
  839                             goto addr_error;
  840                         if (addr_32)
  841                             regs->uesp = sp;
  842                         else
  843                             regs->uesp = (regs->uesp & 0xffff0000) | sp;
  844                         break;
  845                     }
  846 
  847                     case 0x9d:          /* popf */
  848                     {
  849                         vm_offset_t sp;
  850                         int     nflags;
  851 
  852                         sp = regs->uesp;
  853                         if (!addr_32)
  854                             sp &= 0xffff;
  855                         else if (sp > 0xffff)
  856                             goto stack_error;
  857 
  858                         if (data_32) {
  859                             if (sp > 0xffff - sizeof(int))
  860                                 goto stack_error;
  861                             nflags = *(int *)Addr8086(regs->ss,sp);
  862                             sp += sizeof(int);
  863                         }
  864                         else {
  865                             if (sp > 0xffff - sizeof(short))
  866                                 goto stack_error;
  867                             nflags = *(unsigned short *)
  868                                         Addr8086(regs->ss,sp);
  869                             sp += sizeof(short);
  870                         }
  871                         if (addr_32)
  872                             regs->uesp = sp;
  873                         else
  874                             regs->uesp = (regs->uesp & 0xffff0000) | sp;
  875 
  876                         if (v86->flags & V86_IRET_PENDING) {
  877                                 v86->flags = nflags & (EFL_TF | EFL_IF);
  878                                 v86->flags |= V86_IRET_PENDING;
  879                         } else {
  880                                 v86->flags = nflags & (EFL_TF | EFL_IF);
  881                         }
  882                         regs->efl = (regs->efl & ~EFL_V86_SAFE)
  883                                      | (nflags & EFL_V86_SAFE);
  884                         break;
  885                     }
  886                     case 0xcf:          /* iret */
  887                     {
  888                         vm_offset_t sp;
  889                         int     nflags;
  890                         int     size;
  891                         union iret_struct iret_struct;
  892 
  893                         v86->flags &= ~V86_IRET_PENDING;
  894                         sp = regs->uesp;
  895                         if (!addr_32)
  896                             sp &= 0xffff;
  897                         else if (sp > 0xffff)
  898                             goto stack_error;
  899 
  900                         if (data_32) {
  901                             if (sp > 0xffff - sizeof(struct iret_32))
  902                                 goto stack_error;
  903                             iret_struct.iret_32 =
  904                                 *(struct iret_32 *) Addr8086(regs->ss,sp);
  905                             sp += sizeof(struct iret_32);
  906                         }
  907                         else {
  908                             if (sp > 0xffff - sizeof(struct iret_16))
  909                                 goto stack_error;
  910                             iret_struct.iret_16 =
  911                                 *(struct iret_16 *) Addr8086(regs->ss,sp);
  912                             sp += sizeof(struct iret_16);
  913                         }
  914                         if (addr_32)
  915                             regs->uesp = sp;
  916                         else
  917                             regs->uesp = (regs->uesp & 0xffff0000) | sp;
  918 
  919                         if (data_32) {
  920                             eip       = iret_struct.iret_32.eip;
  921                             regs->cs  = iret_struct.iret_32.cs & 0xffff;
  922                             nflags    = iret_struct.iret_32.eflags;
  923                         }
  924                         else {
  925                             eip       = iret_struct.iret_16.ip;
  926                             regs->cs  = iret_struct.iret_16.cs;
  927                             nflags    = iret_struct.iret_16.flags;
  928                         }
  929 
  930                         v86->flags = nflags & (EFL_TF | EFL_IF);
  931                         regs->efl = (regs->efl & ~EFL_V86_SAFE)
  932                                      | (nflags & EFL_V86_SAFE);
  933                         break;
  934                     }
  935                     default:
  936                         /*
  937                          * Instruction not emulated here.
  938                          */
  939                         thread->recover = 0;
  940                         return FALSE;
  941                 }
  942                 break;  /* exit from 'while TRUE' */
  943             }
  944             regs->eip = (regs->eip & 0xffff0000 | eip);
  945         }
  946         else {
  947             /*
  948              * Not a trap we handle.
  949              */
  950             thread->recover = 0;
  951             return FALSE;
  952         }
  953 
  954         if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) {
  955 
  956             struct v86_interrupt_table *int_table;
  957             int int_count;
  958             int vec;
  959             int i;
  960 
  961             int_table = (struct v86_interrupt_table *) v86->int_table;
  962             int_count = v86->int_count;
  963 
  964             vec = 0;
  965             for (i = 0; i < int_count; int_table++, i++) {
  966                 if (!int_table->mask && int_table->count > 0) {
  967                     int_table->count--;
  968                     vec = int_table->vec;
  969                     break;
  970                 }
  971             }
  972             if (vec != 0) {
  973                 /*
  974                  * Take this interrupt
  975                  */
  976                 vm_offset_t     sp;
  977                 struct iret_16 iret_16;
  978                 struct int_vec int_vec;
  979 
  980                 sp = regs->uesp & 0xffff;
  981                 if (sp < sizeof(struct iret_16))
  982                     goto stack_error;
  983                 sp -= sizeof(struct iret_16);
  984                 iret_16.ip = regs->eip;
  985                 iret_16.cs = regs->cs;
  986                 iret_16.flags = regs->efl & 0xFFFF;
  987                 if ((v86->flags & EFL_TF) == 0)
  988                     iret_16.flags &= ~EFL_TF;
  989                 else iret_16.flags |= EFL_TF;
  990 
  991 #ifdef  gcc_1_36_worked
  992                 int_vec = ((struct int_vec *)0)[vec];
  993 #else
  994                 bcopy((char *) (sizeof(struct int_vec) * vec),
  995                       (char *)&int_vec,
  996                       sizeof (struct int_vec));
  997 #endif
  998                 if (copyout((char *)&iret_16,
  999                             (char *)Addr8086(regs->ss,sp),
 1000                             sizeof(struct iret_16)))
 1001                     goto addr_error;
 1002                 regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff);
 1003                 regs->eip = int_vec.ip;
 1004                 regs->cs  = int_vec.cs;
 1005                 regs->efl  &= ~EFL_TF;
 1006                 v86->flags &= ~(EFL_IF | EFL_TF);
 1007                 v86->flags |= V86_IRET_PENDING;
 1008             }
 1009         }
 1010 
 1011         thread->recover = 0;
 1012         return TRUE;
 1013 
 1014         /*
 1015          *      On address error, report a page fault.
 1016          *      XXX report GP fault - we don`t save
 1017          *      the faulting address.
 1018          */
 1019     addr_error:
 1020         asm("addr_error:;");
 1021         thread->recover = 0;
 1022         return FALSE;
 1023 
 1024         /*
 1025          *      On stack address error, return stack fault (12).
 1026          */
 1027     stack_error:
 1028         thread->recover = 0;
 1029         regs->trapno = T_STACK_FAULT;
 1030         return FALSE;
 1031 }
 1032 
 1033 /*
 1034  * Handle AST traps for i386.
 1035  * Check for delayed floating-point exception from
 1036  * AT-bus machines.
 1037  */
 1038 void
 1039 i386_astintr()
 1040 {
 1041         int     mycpu = cpu_number();
 1042 
 1043         (void) splsched();      /* block interrupts to check reasons */
 1044         if (need_ast[mycpu] & AST_I386_FP) {
 1045             /*
 1046              * AST was for delayed floating-point exception -
 1047              * FP interrupt occured while in kernel.
 1048              * Turn off this AST reason and handle the FPU error.
 1049              */
 1050             ast_off(mycpu, AST_I386_FP);
 1051             (void) spl0();
 1052 
 1053             fpexterrflt();
 1054         }
 1055         else {
 1056             /*
 1057              * Not an FPU trap.  Handle the AST.
 1058              * Interrupts are still blocked.
 1059              */
 1060             ast_taken();
 1061         }
 1062 }
 1063 
 1064 /*
 1065  * Handle exceptions for i386.
 1066  *
 1067  * If we are an AT bus machine, we must turn off the AST for a
 1068  * delayed floating-point exception.
 1069  *
 1070  * If we are providing floating-point emulation, we may have
 1071  * to retrieve the real register values from the floating point
 1072  * emulator.
 1073  */
 1074 void
 1075 i386_exception(exc, code, subcode)
 1076         int     exc;
 1077         int     code;
 1078         int     subcode;
 1079 {
 1080         spl_t   s;
 1081 
 1082         /*
 1083          * Turn off delayed FPU error handling.
 1084          */
 1085         s = splsched();
 1086         ast_off(cpu_number(), AST_I386_FP);
 1087         splx(s);
 1088 
 1089 #if     FPE
 1090         fpe_exception_fixup(exc, code, subcode);
 1091 #else
 1092         exception(exc, code, subcode);
 1093 #endif
 1094         /*NOTREACHED*/
 1095 }
 1096 
 1097 boolean_t
 1098 check_io_fault(regs)
 1099         struct i386_saved_state *regs;
 1100 {
 1101         int             eip, opcode, io_port;
 1102         boolean_t       data_16 = FALSE;
 1103 
 1104         /*
 1105          * Get the instruction.
 1106          */
 1107         eip = regs->eip;
 1108 
 1109         for (;;) {
 1110             opcode = inst_fetch(eip, regs->cs);
 1111             eip++;
 1112             switch (opcode) {
 1113                 case 0x66:      /* data-size prefix */
 1114                     data_16 = TRUE;
 1115                     continue;
 1116 
 1117                 case 0xf3:      /* rep prefix */
 1118                 case 0x26:      /* es */
 1119                 case 0x2e:      /* cs */
 1120                 case 0x36:      /* ss */
 1121                 case 0x3e:      /* ds */
 1122                 case 0x64:      /* fs */
 1123                 case 0x65:      /* gs */
 1124                     continue;
 1125 
 1126                 case 0xE4:      /* inb imm */
 1127                 case 0xE5:      /* inl imm */
 1128                 case 0xE6:      /* outb imm */
 1129                 case 0xE7:      /* outl imm */
 1130                     /* port is immediate byte */
 1131                     io_port = inst_fetch(eip, regs->cs);
 1132                     eip++;
 1133                     break;
 1134 
 1135                 case 0xEC:      /* inb dx */
 1136                 case 0xED:      /* inl dx */
 1137                 case 0xEE:      /* outb dx */
 1138                 case 0xEF:      /* outl dx */
 1139                 case 0x6C:      /* insb */
 1140                 case 0x6D:      /* insl */
 1141                 case 0x6E:      /* outsb */
 1142                 case 0x6F:      /* outsl */
 1143                     /* port is in DX register */
 1144                     io_port = regs->edx & 0xFFFF;
 1145                     break;
 1146 
 1147                 default:
 1148                     return FALSE;
 1149             }
 1150             break;
 1151         }
 1152 
 1153         if (data_16)
 1154             opcode |= 0x6600;           /* word IO */
 1155 
 1156         switch (emulate_io(regs, opcode, io_port)) {
 1157             case EM_IO_DONE:
 1158                 /* instruction executed */
 1159                 regs->eip = eip;
 1160                 return TRUE;
 1161 
 1162             case EM_IO_RETRY:
 1163                 /* port mapped, retry instruction */
 1164                 return TRUE;
 1165 
 1166             case EM_IO_ERROR:
 1167                 /* port not mapped */
 1168                 return FALSE;
 1169         }
 1170 }
 1171 
 1172 #if     MACH_PCSAMPLE > 0
 1173 /*
 1174  * return saved state for interrupted user thread
 1175  */
 1176 unsigned
 1177 interrupted_pc(t)
 1178         thread_t t;
 1179 {
 1180         register struct i386_saved_state *iss;
 1181 
 1182         iss = USER_REGS(t);
 1183         return iss->eip;
 1184 }
 1185 #endif  /* MACH_PCSAMPLE > 0*/
 1186 

Cache object: 3ba34dcc0cfe2d04aa442cdf0b0840d3


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.