The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/trap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 1994, David Greenman
    3  * Copyright (c) 1990, 1993
    4  *      The Regents of the University of California.  All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * the University of Utah, and William Jolitz.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by the University of
   20  *      California, Berkeley and its contributors.
   21  * 4. Neither the name of the University nor the names of its contributors
   22  *    may be used to endorse or promote products derived from this software
   23  *    without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   35  * SUCH DAMAGE.
   36  *
   37  *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/6.1/sys/i386/i386/trap.c 158179 2006-04-30 16:44:43Z cvs2svn $");
   42 
   43 /*
   44  * 386 Trap and System call handling
   45  */
   46 
   47 #include "opt_clock.h"
   48 #include "opt_cpu.h"
   49 #include "opt_hwpmc_hooks.h"
   50 #include "opt_isa.h"
   51 #include "opt_kdb.h"
   52 #include "opt_ktrace.h"
   53 #include "opt_npx.h"
   54 #include "opt_trap.h"
   55 
   56 #include <sys/param.h>
   57 #include <sys/bus.h>
   58 #include <sys/systm.h>
   59 #include <sys/proc.h>
   60 #include <sys/pioctl.h>
   61 #include <sys/ptrace.h>
   62 #include <sys/kdb.h>
   63 #include <sys/kernel.h>
   64 #include <sys/ktr.h>
   65 #include <sys/lock.h>
   66 #include <sys/mutex.h>
   67 #include <sys/resourcevar.h>
   68 #include <sys/signalvar.h>
   69 #include <sys/syscall.h>
   70 #include <sys/sysctl.h>
   71 #include <sys/sysent.h>
   72 #include <sys/uio.h>
   73 #include <sys/vmmeter.h>
   74 #ifdef KTRACE
   75 #include <sys/ktrace.h>
   76 #endif
   77 #ifdef HWPMC_HOOKS
   78 #include <sys/pmckern.h>
   79 #endif
   80 
   81 #include <vm/vm.h>
   82 #include <vm/vm_param.h>
   83 #include <vm/pmap.h>
   84 #include <vm/vm_kern.h>
   85 #include <vm/vm_map.h>
   86 #include <vm/vm_page.h>
   87 #include <vm/vm_extern.h>
   88 
   89 #include <machine/cpu.h>
   90 #include <machine/intr_machdep.h>
   91 #include <machine/md_var.h>
   92 #include <machine/pcb.h>
   93 #ifdef SMP
   94 #include <machine/smp.h>
   95 #endif
   96 #include <machine/tss.h>
   97 #include <machine/vm86.h>
   98 
   99 #ifdef POWERFAIL_NMI
  100 #include <sys/syslog.h>
  101 #include <machine/clock.h>
  102 #endif
  103 
  104 extern void trap(struct trapframe frame);
  105 extern void syscall(struct trapframe frame);
  106 
  107 static int trap_pfault(struct trapframe *, int, vm_offset_t);
  108 static void trap_fatal(struct trapframe *, vm_offset_t);
  109 void dblfault_handler(void);
  110 
  111 extern inthand_t IDTVEC(lcall_syscall);
  112 
  113 #define MAX_TRAP_MSG            30
  114 static char *trap_msg[] = {
  115         "",                                     /*  0 unused */
  116         "privileged instruction fault",         /*  1 T_PRIVINFLT */
  117         "",                                     /*  2 unused */
  118         "breakpoint instruction fault",         /*  3 T_BPTFLT */
  119         "",                                     /*  4 unused */
  120         "",                                     /*  5 unused */
  121         "arithmetic trap",                      /*  6 T_ARITHTRAP */
  122         "",                                     /*  7 unused */
  123         "",                                     /*  8 unused */
  124         "general protection fault",             /*  9 T_PROTFLT */
  125         "trace trap",                           /* 10 T_TRCTRAP */
  126         "",                                     /* 11 unused */
  127         "page fault",                           /* 12 T_PAGEFLT */
  128         "",                                     /* 13 unused */
  129         "alignment fault",                      /* 14 T_ALIGNFLT */
  130         "",                                     /* 15 unused */
  131         "",                                     /* 16 unused */
  132         "",                                     /* 17 unused */
  133         "integer divide fault",                 /* 18 T_DIVIDE */
  134         "non-maskable interrupt trap",          /* 19 T_NMI */
  135         "overflow trap",                        /* 20 T_OFLOW */
  136         "FPU bounds check fault",               /* 21 T_BOUND */
  137         "FPU device not available",             /* 22 T_DNA */
  138         "double fault",                         /* 23 T_DOUBLEFLT */
  139         "FPU operand fetch fault",              /* 24 T_FPOPFLT */
  140         "invalid TSS fault",                    /* 25 T_TSSFLT */
  141         "segment not present fault",            /* 26 T_SEGNPFLT */
  142         "stack fault",                          /* 27 T_STKFLT */
  143         "machine check trap",                   /* 28 T_MCHK */
  144         "SIMD floating-point exception",        /* 29 T_XMMFLT */
  145         "reserved (unknown) fault",             /* 30 T_RESERVED */
  146 };
  147 
  148 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  149 extern int has_f00f_bug;
  150 #endif
  151 
  152 #ifdef KDB
  153 static int kdb_on_nmi = 1;
  154 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RW,
  155         &kdb_on_nmi, 0, "Go to KDB on NMI");
  156 #endif
  157 static int panic_on_nmi = 1;
  158 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
  159         &panic_on_nmi, 0, "Panic on NMI");
  160 
  161 #ifdef WITNESS
  162 extern char *syscallnames[];
  163 #endif
  164 
  165 /*
  166  * Exception, fault, and trap interface to the FreeBSD kernel.
  167  * This common code is called from assembly language IDT gate entry
  168  * routines that prepare a suitable stack frame, and restore this
  169  * frame after the exception has been processed.
  170  */
  171 
  172 void
  173 trap(frame)
  174         struct trapframe frame;
  175 {
  176         struct thread *td = curthread;
  177         struct proc *p = td->td_proc;
  178         u_int sticks = 0;
  179         int i = 0, ucode = 0, type, code;
  180         vm_offset_t eva;
  181 #ifdef POWERFAIL_NMI
  182         static int lastalert = 0;
  183 #endif
  184 
  185         PCPU_LAZY_INC(cnt.v_trap);
  186         type = frame.tf_trapno;
  187 
  188 #ifdef KDB_STOP_NMI
  189         /* Handler for NMI IPIs used for debugging */
  190         if (type == T_NMI) {
  191                  if (ipi_nmi_handler() == 0)
  192                            goto out;
  193         }
  194 #endif /* KDB_STOP_NMI */
  195 
  196 #ifdef KDB
  197         if (kdb_active) {
  198                 kdb_reenter();
  199                 goto out;
  200         }
  201 #endif
  202 
  203 #ifdef  HWPMC_HOOKS
  204         /*
  205          * CPU PMCs interrupt using an NMI so we check for that first.
  206          * If the HWPMC module is active, 'pmc_hook' will point to
  207          * the function to be called.  A return value of '1' from the
  208          * hook means that the NMI was handled by it and that we can
  209          * return immediately.
  210          */
  211         if (type == T_NMI && pmc_intr &&
  212             (*pmc_intr)(PCPU_GET(cpuid), (uintptr_t) frame.tf_eip,
  213                 TRAPF_USERMODE(&frame)))
  214             goto out;
  215 #endif
  216 
  217         if ((frame.tf_eflags & PSL_I) == 0) {
  218                 /*
  219                  * Buggy application or kernel code has disabled
  220                  * interrupts and then trapped.  Enabling interrupts
  221                  * now is wrong, but it is better than running with
  222                  * interrupts disabled until they are accidentally
  223                  * enabled later.
  224                  */
  225                 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM))
  226                         printf(
  227                             "pid %ld (%s): trap %d with interrupts disabled\n",
  228                             (long)curproc->p_pid, curproc->p_comm, type);
  229                 else if (type != T_BPTFLT && type != T_TRCTRAP &&
  230                          frame.tf_eip != (int)cpu_switch_load_gs) {
  231                         /*
  232                          * XXX not quite right, since this may be for a
  233                          * multiple fault in user mode.
  234                          */
  235                         printf("kernel trap %d with interrupts disabled\n",
  236                             type);
  237                         /*
  238                          * Page faults need interrupts disabled until later,
  239                          * and we shouldn't enable interrupts while in a
  240                          * critical section or if servicing an NMI.
  241                          */
  242                         if (type != T_NMI && type != T_PAGEFLT &&
  243                             td->td_critnest == 0)
  244                                 enable_intr();
  245                 }
  246         }
  247 
  248         eva = 0;
  249         code = frame.tf_err;
  250         if (type == T_PAGEFLT) {
  251                 /*
  252                  * For some Cyrix CPUs, %cr2 is clobbered by
  253                  * interrupts.  This problem is worked around by using
  254                  * an interrupt gate for the pagefault handler.  We
  255                  * are finally ready to read %cr2 and then must
  256                  * reenable interrupts.
  257                  *
  258                  * If we get a page fault while in a critical section, then
  259                  * it is most likely a fatal kernel page fault.  The kernel
  260                  * is already going to panic trying to get a sleep lock to
  261                  * do the VM lookup, so just consider it a fatal trap so the
  262                  * kernel can print out a useful trap message and even get
  263                  * to the debugger.
  264                  */
  265                 eva = rcr2();
  266                 if (td->td_critnest == 0)
  267                         enable_intr();
  268                 else
  269                         trap_fatal(&frame, eva);
  270         }
  271 
  272         if ((ISPL(frame.tf_cs) == SEL_UPL) ||
  273             ((frame.tf_eflags & PSL_VM) && 
  274                 !(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) {
  275                 /* user trap */
  276 
  277                 sticks = td->td_sticks;
  278                 td->td_frame = &frame;
  279                 if (td->td_ucred != p->p_ucred) 
  280                         cred_update_thread(td);
  281 
  282                 switch (type) {
  283                 case T_PRIVINFLT:       /* privileged instruction fault */
  284                         ucode = type;
  285                         i = SIGILL;
  286                         break;
  287 
  288                 case T_BPTFLT:          /* bpt instruction fault */
  289                 case T_TRCTRAP:         /* trace trap */
  290                         enable_intr();
  291                         frame.tf_eflags &= ~PSL_T;
  292                         i = SIGTRAP;
  293                         break;
  294 
  295                 case T_ARITHTRAP:       /* arithmetic trap */
  296 #ifdef DEV_NPX
  297                         ucode = npxtrap();
  298                         if (ucode == -1)
  299                                 goto userout;
  300 #else
  301                         ucode = code;
  302 #endif
  303                         i = SIGFPE;
  304                         break;
  305 
  306                         /*
  307                          * The following two traps can happen in
  308                          * vm86 mode, and, if so, we want to handle
  309                          * them specially.
  310                          */
  311                 case T_PROTFLT:         /* general protection fault */
  312                 case T_STKFLT:          /* stack fault */
  313                         if (frame.tf_eflags & PSL_VM) {
  314                                 i = vm86_emulate((struct vm86frame *)&frame);
  315                                 if (i == 0)
  316                                         goto user;
  317                                 break;
  318                         }
  319                         /* FALLTHROUGH */
  320 
  321                 case T_SEGNPFLT:        /* segment not present fault */
  322                 case T_TSSFLT:          /* invalid TSS fault */
  323                 case T_DOUBLEFLT:       /* double fault */
  324                 default:
  325                         ucode = code + BUS_SEGM_FAULT ;
  326                         i = SIGBUS;
  327                         break;
  328 
  329                 case T_PAGEFLT:         /* page fault */
  330                         if (td->td_pflags & TDP_SA)
  331                                 thread_user_enter(td);
  332 
  333                         i = trap_pfault(&frame, TRUE, eva);
  334 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  335                         if (i == -2) {
  336                                 /*
  337                                  * The f00f hack workaround has triggered, so
  338                                  * treat the fault as an illegal instruction 
  339                                  * (T_PRIVINFLT) instead of a page fault.
  340                                  */
  341                                 type = frame.tf_trapno = T_PRIVINFLT;
  342 
  343                                 /* Proceed as in that case. */
  344                                 ucode = type;
  345                                 i = SIGILL;
  346                                 break;
  347                         }
  348 #endif
  349                         if (i == -1)
  350                                 goto userout;
  351                         if (i == 0)
  352                                 goto user;
  353 
  354                         ucode = T_PAGEFLT;
  355                         break;
  356 
  357                 case T_DIVIDE:          /* integer divide fault */
  358                         ucode = FPE_INTDIV;
  359                         i = SIGFPE;
  360                         break;
  361 
  362 #ifdef DEV_ISA
  363                 case T_NMI:
  364 #ifdef POWERFAIL_NMI
  365 #ifndef TIMER_FREQ
  366 #  define TIMER_FREQ 1193182
  367 #endif
  368                         mtx_lock(&Giant);
  369                         if (time_second - lastalert > 10) {
  370                                 log(LOG_WARNING, "NMI: power fail\n");
  371                                 sysbeep(TIMER_FREQ/880, hz);
  372                                 lastalert = time_second;
  373                         }
  374                         mtx_unlock(&Giant);
  375                         goto userout;
  376 #else /* !POWERFAIL_NMI */
  377                         /* machine/parity/power fail/"kitchen sink" faults */
  378                         /* XXX Giant */
  379                         if (isa_nmi(code) == 0) {
  380 #ifdef KDB
  381                                 /*
  382                                  * NMI can be hooked up to a pushbutton
  383                                  * for debugging.
  384                                  */
  385                                 if (kdb_on_nmi) {
  386                                         printf ("NMI ... going to debugger\n");
  387                                         kdb_trap(type, 0, &frame);
  388                                 }
  389 #endif /* KDB */
  390                                 goto userout;
  391                         } else if (panic_on_nmi)
  392                                 panic("NMI indicates hardware failure");
  393                         break;
  394 #endif /* POWERFAIL_NMI */
  395 #endif /* DEV_ISA */
  396 
  397                 case T_OFLOW:           /* integer overflow fault */
  398                         ucode = FPE_INTOVF;
  399                         i = SIGFPE;
  400                         break;
  401 
  402                 case T_BOUND:           /* bounds check fault */
  403                         ucode = FPE_FLTSUB;
  404                         i = SIGFPE;
  405                         break;
  406 
  407                 case T_DNA:
  408 #ifdef DEV_NPX
  409                         /* transparent fault (due to context switch "late") */
  410                         if (npxdna())
  411                                 goto userout;
  412 #endif
  413                         i = SIGFPE;
  414                         ucode = FPE_FPU_NP_TRAP;
  415                         break;
  416 
  417                 case T_FPOPFLT:         /* FPU operand fetch fault */
  418                         ucode = T_FPOPFLT;
  419                         i = SIGILL;
  420                         break;
  421 
  422                 case T_XMMFLT:          /* SIMD floating-point exception */
  423                         ucode = 0; /* XXX */
  424                         i = SIGFPE;
  425                         break;
  426                 }
  427         } else {
  428                 /* kernel trap */
  429 
  430                 KASSERT(cold || td->td_ucred != NULL,
  431                     ("kernel trap doesn't have ucred"));
  432                 switch (type) {
  433                 case T_PAGEFLT:                 /* page fault */
  434                         (void) trap_pfault(&frame, FALSE, eva);
  435                         goto out;
  436 
  437                 case T_DNA:
  438 #ifdef DEV_NPX
  439                         /*
  440                          * The kernel is apparently using npx for copying.
  441                          * XXX this should be fatal unless the kernel has
  442                          * registered such use.
  443                          */
  444                         if (npxdna())
  445                                 goto out;
  446 #endif
  447                         break;
  448 
  449                         /*
  450                          * The following two traps can happen in
  451                          * vm86 mode, and, if so, we want to handle
  452                          * them specially.
  453                          */
  454                 case T_PROTFLT:         /* general protection fault */
  455                 case T_STKFLT:          /* stack fault */
  456                         if (frame.tf_eflags & PSL_VM) {
  457                                 i = vm86_emulate((struct vm86frame *)&frame);
  458                                 if (i != 0)
  459                                         /*
  460                                          * returns to original process
  461                                          */
  462                                         vm86_trap((struct vm86frame *)&frame);
  463                                 goto out;
  464                         }
  465                         if (type == T_STKFLT)
  466                                 break;
  467 
  468                         /* FALL THROUGH */
  469 
  470                 case T_SEGNPFLT:        /* segment not present fault */
  471                         if (PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL)
  472                                 break;
  473 
  474                         /*
  475                          * Invalid %fs's and %gs's can be created using
  476                          * procfs or PT_SETREGS or by invalidating the
  477                          * underlying LDT entry.  This causes a fault
  478                          * in kernel mode when the kernel attempts to
  479                          * switch contexts.  Lose the bad context
  480                          * (XXX) so that we can continue, and generate
  481                          * a signal.
  482                          */
  483                         if (frame.tf_eip == (int)cpu_switch_load_gs) {
  484                                 PCPU_GET(curpcb)->pcb_gs = 0;
  485 #if 0                           
  486                                 PROC_LOCK(p);
  487                                 psignal(p, SIGBUS);
  488                                 PROC_UNLOCK(p);
  489 #endif                          
  490                                 goto out;
  491                         }
  492 
  493                         if (td->td_intr_nesting_level != 0)
  494                                 break;
  495 
  496                         /*
  497                          * Invalid segment selectors and out of bounds
  498                          * %eip's and %esp's can be set up in user mode.
  499                          * This causes a fault in kernel mode when the
  500                          * kernel tries to return to user mode.  We want
  501                          * to get this fault so that we can fix the
  502                          * problem here and not have to check all the
  503                          * selectors and pointers when the user changes
  504                          * them.
  505                          */
  506                         if (frame.tf_eip == (int)doreti_iret) {
  507                                 frame.tf_eip = (int)doreti_iret_fault;
  508                                 goto out;
  509                         }
  510                         if (frame.tf_eip == (int)doreti_popl_ds) {
  511                                 frame.tf_eip = (int)doreti_popl_ds_fault;
  512                                 goto out;
  513                         }
  514                         if (frame.tf_eip == (int)doreti_popl_es) {
  515                                 frame.tf_eip = (int)doreti_popl_es_fault;
  516                                 goto out;
  517                         }
  518                         if (frame.tf_eip == (int)doreti_popl_fs) {
  519                                 frame.tf_eip = (int)doreti_popl_fs_fault;
  520                                 goto out;
  521                         }
  522                         if (PCPU_GET(curpcb)->pcb_onfault != NULL) {
  523                                 frame.tf_eip =
  524                                     (int)PCPU_GET(curpcb)->pcb_onfault;
  525                                 goto out;
  526                         }
  527                         break;
  528 
  529                 case T_TSSFLT:
  530                         /*
  531                          * PSL_NT can be set in user mode and isn't cleared
  532                          * automatically when the kernel is entered.  This
  533                          * causes a TSS fault when the kernel attempts to
  534                          * `iret' because the TSS link is uninitialized.  We
  535                          * want to get this fault so that we can fix the
  536                          * problem here and not every time the kernel is
  537                          * entered.
  538                          */
  539                         if (frame.tf_eflags & PSL_NT) {
  540                                 frame.tf_eflags &= ~PSL_NT;
  541                                 goto out;
  542                         }
  543                         break;
  544 
  545                 case T_TRCTRAP:  /* trace trap */
  546                         if (frame.tf_eip == (int)IDTVEC(lcall_syscall)) {
  547                                 /*
  548                                  * We've just entered system mode via the
  549                                  * syscall lcall.  Continue single stepping
  550                                  * silently until the syscall handler has
  551                                  * saved the flags.
  552                                  */
  553                                 goto out;
  554                         }
  555                         if (frame.tf_eip == (int)IDTVEC(lcall_syscall) + 1) {
  556                                 /*
  557                                  * The syscall handler has now saved the
  558                                  * flags.  Stop single stepping it.
  559                                  */
  560                                 frame.tf_eflags &= ~PSL_T;
  561                                 goto out;
  562                         }
  563                         /*
  564                          * Ignore debug register trace traps due to
  565                          * accesses in the user's address space, which
  566                          * can happen under several conditions such as
  567                          * if a user sets a watchpoint on a buffer and
  568                          * then passes that buffer to a system call.
  569                          * We still want to get TRCTRAPS for addresses
  570                          * in kernel space because that is useful when
  571                          * debugging the kernel.
  572                          */
  573                         /* XXX Giant */
  574                         if (user_dbreg_trap() && 
  575                            !(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL)) {
  576                                 /*
  577                                  * Reset breakpoint bits because the
  578                                  * processor doesn't
  579                                  */
  580                                 load_dr6(rdr6() & 0xfffffff0);
  581                                 goto out;
  582                         }
  583                         /*
  584                          * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
  585                          */
  586                 case T_BPTFLT:
  587                         /*
  588                          * If KDB is enabled, let it handle the debugger trap.
  589                          * Otherwise, debugger traps "can't happen".
  590                          */
  591 #ifdef KDB
  592                         /* XXX Giant */
  593                         if (kdb_trap(type, 0, &frame))
  594                                 goto out;
  595 #endif
  596                         break;
  597 
  598 #ifdef DEV_ISA
  599                 case T_NMI:
  600 #ifdef POWERFAIL_NMI
  601                         mtx_lock(&Giant);
  602                         if (time_second - lastalert > 10) {
  603                                 log(LOG_WARNING, "NMI: power fail\n");
  604                                 sysbeep(TIMER_FREQ/880, hz);
  605                                 lastalert = time_second;
  606                         }
  607                         mtx_unlock(&Giant);
  608                         goto out;
  609 #else /* !POWERFAIL_NMI */
  610                         /* XXX Giant */
  611                         /* machine/parity/power fail/"kitchen sink" faults */
  612                         if (isa_nmi(code) == 0) {
  613 #ifdef KDB
  614                                 /*
  615                                  * NMI can be hooked up to a pushbutton
  616                                  * for debugging.
  617                                  */
  618                                 if (kdb_on_nmi) {
  619                                         printf ("NMI ... going to debugger\n");
  620                                         kdb_trap(type, 0, &frame);
  621                                 }
  622 #endif /* KDB */
  623                                 goto out;
  624                         } else if (panic_on_nmi == 0)
  625                                 goto out;
  626                         /* FALLTHROUGH */
  627 #endif /* POWERFAIL_NMI */
  628 #endif /* DEV_ISA */
  629                 }
  630 
  631                 trap_fatal(&frame, eva);
  632                 goto out;
  633         }
  634 
  635         /* Translate fault for emulators (e.g. Linux) */
  636         if (*p->p_sysent->sv_transtrap)
  637                 i = (*p->p_sysent->sv_transtrap)(i, type);
  638 
  639         trapsignal(td, i, ucode);
  640 
  641 #ifdef DEBUG
  642         if (type <= MAX_TRAP_MSG) {
  643                 uprintf("fatal process exception: %s",
  644                         trap_msg[type]);
  645                 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
  646                         uprintf(", fault VA = 0x%lx", (u_long)eva);
  647                 uprintf("\n");
  648         }
  649 #endif
  650 
  651 user:
  652         userret(td, &frame, sticks);
  653         mtx_assert(&Giant, MA_NOTOWNED);
  654 userout:
  655 out:
  656         return;
  657 }
  658 
  659 static int
  660 trap_pfault(frame, usermode, eva)
  661         struct trapframe *frame;
  662         int usermode;
  663         vm_offset_t eva;
  664 {
  665         vm_offset_t va;
  666         struct vmspace *vm = NULL;
  667         vm_map_t map = 0;
  668         int rv = 0;
  669         vm_prot_t ftype;
  670         struct thread *td = curthread;
  671         struct proc *p = td->td_proc;
  672 
  673         va = trunc_page(eva);
  674         if (va >= KERNBASE) {
  675                 /*
  676                  * Don't allow user-mode faults in kernel address space.
  677                  * An exception:  if the faulting address is the invalid
  678                  * instruction entry in the IDT, then the Intel Pentium
  679                  * F00F bug workaround was triggered, and we need to
  680                  * treat it is as an illegal instruction, and not a page
  681                  * fault.
  682                  */
  683 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  684                 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug)
  685                         return -2;
  686 #endif
  687                 if (usermode)
  688                         goto nogo;
  689 
  690                 map = kernel_map;
  691         } else {
  692                 /*
  693                  * This is a fault on non-kernel virtual memory.
  694                  * vm is initialized above to NULL. If curproc is NULL
  695                  * or curproc->p_vmspace is NULL the fault is fatal.
  696                  */
  697                 if (p != NULL)
  698                         vm = p->p_vmspace;
  699 
  700                 if (vm == NULL)
  701                         goto nogo;
  702 
  703                 map = &vm->vm_map;
  704         }
  705 
  706         if (frame->tf_err & PGEX_W)
  707                 ftype = VM_PROT_WRITE;
  708         else
  709                 ftype = VM_PROT_READ;
  710 
  711         if (map != kernel_map) {
  712                 /*
  713                  * Keep swapout from messing with us during this
  714                  *      critical time.
  715                  */
  716                 PROC_LOCK(p);
  717                 ++p->p_lock;
  718                 PROC_UNLOCK(p);
  719 
  720                 /* Fault in the user page: */
  721                 rv = vm_fault(map, va, ftype,
  722                               (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
  723                                                       : VM_FAULT_NORMAL);
  724 
  725                 PROC_LOCK(p);
  726                 --p->p_lock;
  727                 PROC_UNLOCK(p);
  728         } else {
  729                 /*
  730                  * Don't have to worry about process locking or stacks in the
  731                  * kernel.
  732                  */
  733                 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
  734         }
  735         if (rv == KERN_SUCCESS)
  736                 return (0);
  737 nogo:
  738         if (!usermode) {
  739                 if (td->td_intr_nesting_level == 0 &&
  740                     PCPU_GET(curpcb)->pcb_onfault != NULL) {
  741                         frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault;
  742                         return (0);
  743                 }
  744                 trap_fatal(frame, eva);
  745                 return (-1);
  746         }
  747 
  748         /* kludge to pass faulting virtual address to sendsig */
  749         frame->tf_err = eva;
  750 
  751         return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
  752 }
  753 
  754 static void
  755 trap_fatal(frame, eva)
  756         struct trapframe *frame;
  757         vm_offset_t eva;
  758 {
  759         int code, type, ss, esp;
  760         struct soft_segment_descriptor softseg;
  761         char *msg;
  762 
  763         code = frame->tf_err;
  764         type = frame->tf_trapno;
  765         sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
  766 
  767         if (type <= MAX_TRAP_MSG)
  768                 msg = trap_msg[type];
  769         else
  770                 msg = "UNKNOWN";
  771         printf("\n\nFatal trap %d: %s while in %s mode\n", type, msg,
  772             frame->tf_eflags & PSL_VM ? "vm86" :
  773             ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
  774 #ifdef SMP
  775         /* two separate prints in case of a trap on an unmapped page */
  776         printf("cpuid = %d; ", PCPU_GET(cpuid));
  777         printf("apic id = %02x\n", PCPU_GET(apic_id));
  778 #endif
  779         if (type == T_PAGEFLT) {
  780                 printf("fault virtual address   = 0x%x\n", eva);
  781                 printf("fault code              = %s %s, %s\n",
  782                         code & PGEX_U ? "user" : "supervisor",
  783                         code & PGEX_W ? "write" : "read",
  784                         code & PGEX_P ? "protection violation" : "page not present");
  785         }
  786         printf("instruction pointer     = 0x%x:0x%x\n",
  787                frame->tf_cs & 0xffff, frame->tf_eip);
  788         if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
  789                 ss = frame->tf_ss & 0xffff;
  790                 esp = frame->tf_esp;
  791         } else {
  792                 ss = GSEL(GDATA_SEL, SEL_KPL);
  793                 esp = (int)&frame->tf_esp;
  794         }
  795         printf("stack pointer           = 0x%x:0x%x\n", ss, esp);
  796         printf("frame pointer           = 0x%x:0x%x\n", ss, frame->tf_ebp);
  797         printf("code segment            = base 0x%x, limit 0x%x, type 0x%x\n",
  798                softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
  799         printf("                        = DPL %d, pres %d, def32 %d, gran %d\n",
  800                softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
  801                softseg.ssd_gran);
  802         printf("processor eflags        = ");
  803         if (frame->tf_eflags & PSL_T)
  804                 printf("trace trap, ");
  805         if (frame->tf_eflags & PSL_I)
  806                 printf("interrupt enabled, ");
  807         if (frame->tf_eflags & PSL_NT)
  808                 printf("nested task, ");
  809         if (frame->tf_eflags & PSL_RF)
  810                 printf("resume, ");
  811         if (frame->tf_eflags & PSL_VM)
  812                 printf("vm86, ");
  813         printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
  814         printf("current process         = ");
  815         if (curproc) {
  816                 printf("%lu (%s)\n",
  817                     (u_long)curproc->p_pid, curproc->p_comm ?
  818                     curproc->p_comm : "");
  819         } else {
  820                 printf("Idle\n");
  821         }
  822 
  823 #ifdef KDB
  824         if (debugger_on_panic || kdb_active) {
  825                 register_t eflags;
  826                 eflags = intr_disable();
  827                 if (kdb_trap(type, 0, frame)) {
  828                         intr_restore(eflags);
  829                         return;
  830                 }
  831                 intr_restore(eflags);
  832         }
  833 #endif
  834         printf("trap number             = %d\n", type);
  835         if (type <= MAX_TRAP_MSG)
  836                 panic("%s", trap_msg[type]);
  837         else
  838                 panic("unknown/reserved trap");
  839 }
  840 
  841 /*
  842  * Double fault handler. Called when a fault occurs while writing
  843  * a frame for a trap/exception onto the stack. This usually occurs
  844  * when the stack overflows (such is the case with infinite recursion,
  845  * for example).
  846  *
  847  * XXX Note that the current PTD gets replaced by IdlePTD when the
  848  * task switch occurs. This means that the stack that was active at
  849  * the time of the double fault is not available at <kstack> unless
  850  * the machine was idle when the double fault occurred. The downside
  851  * of this is that "trace <ebp>" in ddb won't work.
  852  */
  853 void
  854 dblfault_handler()
  855 {
  856         printf("\nFatal double fault:\n");
  857         printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip));
  858         printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp));
  859         printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp));
  860 #ifdef SMP
  861         /* two separate prints in case of a trap on an unmapped page */
  862         printf("cpuid = %d; ", PCPU_GET(cpuid));
  863         printf("apic id = %02x\n", PCPU_GET(apic_id));
  864 #endif
  865         panic("double fault");
  866 }
  867 
  868 /*
  869  *      syscall -       system call request C handler
  870  *
  871  *      A system call is essentially treated as a trap.
  872  */
  873 void
  874 syscall(frame)
  875         struct trapframe frame;
  876 {
  877         caddr_t params;
  878         struct sysent *callp;
  879         struct thread *td = curthread;
  880         struct proc *p = td->td_proc;
  881         register_t orig_tf_eflags;
  882         u_int sticks;
  883         int error;
  884         int narg;
  885         int args[8];
  886         u_int code;
  887 
  888         /*
  889          * note: PCPU_LAZY_INC() can only be used if we can afford
  890          * occassional inaccuracy in the count.
  891          */
  892         PCPU_LAZY_INC(cnt.v_syscall);
  893 
  894 #ifdef DIAGNOSTIC
  895         if (ISPL(frame.tf_cs) != SEL_UPL) {
  896                 mtx_lock(&Giant);       /* try to stabilize the system XXX */
  897                 panic("syscall");
  898                 /* NOT REACHED */
  899                 mtx_unlock(&Giant);
  900         }
  901 #endif
  902 
  903         sticks = td->td_sticks;
  904         td->td_frame = &frame;
  905         if (td->td_ucred != p->p_ucred) 
  906                 cred_update_thread(td);
  907         if (p->p_flag & P_SA)
  908                 thread_user_enter(td);
  909         params = (caddr_t)frame.tf_esp + sizeof(int);
  910         code = frame.tf_eax;
  911         orig_tf_eflags = frame.tf_eflags;
  912 
  913         if (p->p_sysent->sv_prepsyscall) {
  914                 /*
  915                  * The prep code is MP aware.
  916                  */
  917                 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
  918         } else {
  919                 /*
  920                  * Need to check if this is a 32 bit or 64 bit syscall.
  921                  * fuword is MP aware.
  922                  */
  923                 if (code == SYS_syscall) {
  924                         /*
  925                          * Code is first argument, followed by actual args.
  926                          */
  927                         code = fuword(params);
  928                         params += sizeof(int);
  929                 } else if (code == SYS___syscall) {
  930                         /*
  931                          * Like syscall, but code is a quad, so as to maintain
  932                          * quad alignment for the rest of the arguments.
  933                          */
  934                         code = fuword(params);
  935                         params += sizeof(quad_t);
  936                 }
  937         }
  938 
  939         if (p->p_sysent->sv_mask)
  940                 code &= p->p_sysent->sv_mask;
  941 
  942         if (code >= p->p_sysent->sv_size)
  943                 callp = &p->p_sysent->sv_table[0];
  944         else
  945                 callp = &p->p_sysent->sv_table[code];
  946 
  947         narg = callp->sy_narg & SYF_ARGMASK;
  948 
  949         /*
  950          * copyin and the ktrsyscall()/ktrsysret() code is MP-aware
  951          */
  952         if (params != NULL && narg != 0)
  953                 error = copyin(params, (caddr_t)args,
  954                     (u_int)(narg * sizeof(int)));
  955         else
  956                 error = 0;
  957                 
  958 #ifdef KTRACE
  959         if (KTRPOINT(td, KTR_SYSCALL))
  960                 ktrsyscall(code, narg, args);
  961 #endif
  962 
  963         CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td,
  964             td->td_proc->p_pid, td->td_proc->p_comm, code);
  965 
  966         /*
  967          * Try to run the syscall without Giant if the syscall
  968          * is MP safe.
  969          */
  970         if ((callp->sy_narg & SYF_MPSAFE) == 0)
  971                 mtx_lock(&Giant);
  972 
  973         if (error == 0) {
  974                 td->td_retval[0] = 0;
  975                 td->td_retval[1] = frame.tf_edx;
  976 
  977                 STOPEVENT(p, S_SCE, narg);
  978 
  979                 PTRACESTOP_SC(p, td, S_PT_SCE);
  980 
  981                 error = (*callp->sy_call)(td, args);
  982         }
  983 
  984         switch (error) {
  985         case 0:
  986                 frame.tf_eax = td->td_retval[0];
  987                 frame.tf_edx = td->td_retval[1];
  988                 frame.tf_eflags &= ~PSL_C;
  989                 break;
  990 
  991         case ERESTART:
  992                 /*
  993                  * Reconstruct pc, assuming lcall $X,y is 7 bytes,
  994                  * int 0x80 is 2 bytes. We saved this in tf_err.
  995                  */
  996                 frame.tf_eip -= frame.tf_err;
  997                 break;
  998 
  999         case EJUSTRETURN:
 1000                 break;
 1001 
 1002         default:
 1003                 if (p->p_sysent->sv_errsize) {
 1004                         if (error >= p->p_sysent->sv_errsize)
 1005                                 error = -1;     /* XXX */
 1006                         else
 1007                                 error = p->p_sysent->sv_errtbl[error];
 1008                 }
 1009                 frame.tf_eax = error;
 1010                 frame.tf_eflags |= PSL_C;
 1011                 break;
 1012         }
 1013 
 1014         /*
 1015          * Release Giant if we previously set it.
 1016          */
 1017         if ((callp->sy_narg & SYF_MPSAFE) == 0)
 1018                 mtx_unlock(&Giant);
 1019 
 1020         /*
 1021          * Traced syscall.
 1022          */
 1023         if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) {
 1024                 frame.tf_eflags &= ~PSL_T;
 1025                 trapsignal(td, SIGTRAP, 0);
 1026         }
 1027 
 1028         /*
 1029          * Handle reschedule and other end-of-syscall issues
 1030          */
 1031         userret(td, &frame, sticks);
 1032 
 1033         CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
 1034             td->td_proc->p_pid, td->td_proc->p_comm, code);
 1035 
 1036 #ifdef KTRACE
 1037         if (KTRPOINT(td, KTR_SYSRET))
 1038                 ktrsysret(code, error, td->td_retval[0]);
 1039 #endif
 1040 
 1041         /*
 1042          * This works because errno is findable through the
 1043          * register set.  If we ever support an emulation where this
 1044          * is not the case, this code will need to be revisited.
 1045          */
 1046         STOPEVENT(p, S_SCX, code);
 1047 
 1048         PTRACESTOP_SC(p, td, S_PT_SCX);
 1049 
 1050         WITNESS_WARN(WARN_PANIC, NULL, "System call %s returning",
 1051             (code >= 0 && code < SYS_MAXSYSCALL) ? syscallnames[code] : "???");
 1052         mtx_assert(&sched_lock, MA_NOTOWNED);
 1053         mtx_assert(&Giant, MA_NOTOWNED);
 1054 }
 1055 

Cache object: 1bef77e292e4c0963f1834a7a2120b29


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.