The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/trap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 1994, David Greenman
    3  * Copyright (c) 1990, 1993
    4  *      The Regents of the University of California.  All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * the University of Utah, and William Jolitz.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by the University of
   20  *      California, Berkeley and its contributors.
   21  * 4. Neither the name of the University nor the names of its contributors
   22  *    may be used to endorse or promote products derived from this software
   23  *    without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   35  * SUCH DAMAGE.
   36  *
   37  *      from: @(#)trap.c        7.4 (Berkeley) 5/13/91
   38  * $FreeBSD$
   39  */
   40 
   41 /*
   42  * 386 Trap and System call handling
   43  */
   44 
   45 #include "opt_cpu.h"
   46 #include "opt_ddb.h"
   47 #include "opt_ktrace.h"
   48 #include "opt_trap.h"
   49 #include "opt_vm86.h"
   50 
   51 #include <sys/param.h>
   52 #include <sys/systm.h>
   53 #include <sys/proc.h>
   54 #include <sys/pioctl.h>
   55 #include <sys/kernel.h>
   56 #include <sys/resourcevar.h>
   57 #include <sys/signalvar.h>
   58 #include <sys/syscall.h>
   59 #include <sys/sysent.h>
   60 #include <sys/uio.h>
   61 #include <sys/vmmeter.h>
   62 #ifdef KTRACE
   63 #include <sys/ktrace.h>
   64 #endif
   65 
   66 #include <vm/vm.h>
   67 #include <vm/vm_param.h>
   68 #include <vm/vm_prot.h>
   69 #include <sys/lock.h>
   70 #include <vm/pmap.h>
   71 #include <vm/vm_kern.h>
   72 #include <vm/vm_map.h>
   73 #include <vm/vm_page.h>
   74 #include <vm/vm_extern.h>
   75 
   76 #include <machine/cpu.h>
   77 #include <machine/ipl.h>
   78 #include <machine/md_var.h>
   79 #include <machine/pcb.h>
   80 #ifdef SMP
   81 #include <machine/smp.h>
   82 #endif
   83 #include <machine/tss.h>
   84 
   85 #include <i386/isa/intr_machdep.h>
   86 
   87 #ifdef POWERFAIL_NMI
   88 #include <sys/syslog.h>
   89 #include <machine/clock.h>
   90 #endif
   91 
   92 #ifdef VM86
   93 #include <machine/vm86.h>
   94 #endif
   95 
   96 #ifdef DDB
   97         extern int in_Debugger, debugger_on_panic;
   98 #endif
   99 
  100 #include "isa.h"
  101 #include "npx.h"
  102 
  103 extern struct i386tss common_tss;
  104 
  105 int (*pmath_emulate) __P((struct trapframe *));
  106 
  107 extern void trap __P((struct trapframe frame));
  108 extern int trapwrite __P((unsigned addr));
  109 extern void syscall __P((struct trapframe frame));
  110 
  111 static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
  112 static void trap_fatal __P((struct trapframe *, vm_offset_t));
  113 void dblfault_handler __P((void));
  114 
  115 extern inthand_t IDTVEC(syscall);
  116 
  117 #define MAX_TRAP_MSG            28
  118 static char *trap_msg[] = {
  119         "",                                     /*  0 unused */
  120         "privileged instruction fault",         /*  1 T_PRIVINFLT */
  121         "",                                     /*  2 unused */
  122         "breakpoint instruction fault",         /*  3 T_BPTFLT */
  123         "",                                     /*  4 unused */
  124         "",                                     /*  5 unused */
  125         "arithmetic trap",                      /*  6 T_ARITHTRAP */
  126         "system forced exception",              /*  7 T_ASTFLT */
  127         "",                                     /*  8 unused */
  128         "general protection fault",             /*  9 T_PROTFLT */
  129         "trace trap",                           /* 10 T_TRCTRAP */
  130         "",                                     /* 11 unused */
  131         "page fault",                           /* 12 T_PAGEFLT */
  132         "",                                     /* 13 unused */
  133         "alignment fault",                      /* 14 T_ALIGNFLT */
  134         "",                                     /* 15 unused */
  135         "",                                     /* 16 unused */
  136         "",                                     /* 17 unused */
  137         "integer divide fault",                 /* 18 T_DIVIDE */
  138         "non-maskable interrupt trap",          /* 19 T_NMI */
  139         "overflow trap",                        /* 20 T_OFLOW */
  140         "FPU bounds check fault",               /* 21 T_BOUND */
  141         "FPU device not available",             /* 22 T_DNA */
  142         "double fault",                         /* 23 T_DOUBLEFLT */
  143         "FPU operand fetch fault",              /* 24 T_FPOPFLT */
  144         "invalid TSS fault",                    /* 25 T_TSSFLT */
  145         "segment not present fault",            /* 26 T_SEGNPFLT */
  146         "stack fault",                          /* 27 T_STKFLT */
  147         "machine check trap",                   /* 28 T_MCHK */
  148 };
  149 
  150 static __inline void userret __P((struct proc *p, struct trapframe *frame,
  151                                   u_quad_t oticks));
  152 
  153 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  154 extern struct gate_descriptor *t_idt;
  155 extern int has_f00f_bug;
  156 #endif
  157 
  158 static __inline void
  159 userret(p, frame, oticks)
  160         struct proc *p;
  161         struct trapframe *frame;
  162         u_quad_t oticks;
  163 {
  164         int sig, s;
  165 
  166         while ((sig = CURSIG(p)) != 0)
  167                 postsig(sig);
  168 
  169 #if 0
  170         if (!want_resched &&
  171                 (p->p_priority <= p->p_usrpri) &&
  172                 (p->p_rtprio.type == RTP_PRIO_NORMAL)) {
  173                  int newpriority;
  174                  p->p_estcpu += 1;
  175                  newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice;
  176                  newpriority = min(newpriority, MAXPRI);
  177                  p->p_usrpri = newpriority;
  178         }
  179 #endif
  180                 
  181         p->p_priority = p->p_usrpri;
  182         if (want_resched) {
  183                 /*
  184                  * Since we are curproc, clock will normally just change
  185                  * our priority without moving us from one queue to another
  186                  * (since the running process is not on a queue.)
  187                  * If that happened after we setrunqueue ourselves but before we
  188                  * mi_switch()'ed, we might not be on the queue indicated by
  189                  * our priority.
  190                  */
  191                 s = splhigh();
  192                 setrunqueue(p);
  193                 p->p_stats->p_ru.ru_nivcsw++;
  194                 mi_switch();
  195                 splx(s);
  196                 while ((sig = CURSIG(p)) != 0)
  197                         postsig(sig);
  198         }
  199         /*
  200          * Charge system time if profiling.
  201          */
  202         if (p->p_flag & P_PROFIL)
  203                 addupc_task(p, frame->tf_eip,
  204                             (u_int)(p->p_sticks - oticks) * psratio);
  205 
  206         curpriority = p->p_priority;
  207 }
  208 
  209 /*
  210  * Exception, fault, and trap interface to the FreeBSD kernel.
  211  * This common code is called from assembly language IDT gate entry
  212  * routines that prepare a suitable stack frame, and restore this
  213  * frame after the exception has been processed.
  214  */
  215 
  216 void
  217 trap(frame)
  218         struct trapframe frame;
  219 {
  220         struct proc *p = curproc;
  221         u_quad_t sticks = 0;
  222         int i = 0, ucode = 0, type, code;
  223         vm_offset_t eva;
  224 
  225         if (!(frame.tf_eflags & PSL_I)) {
  226                 /*
  227                  * Buggy application or kernel code has disabled interrupts
  228                  * and then trapped.  Enabling interrupts now is wrong, but
  229                  * it is better than running with interrupts disabled until
  230                  * they are accidentally enabled later.
  231                  */
  232                 type = frame.tf_trapno;
  233                 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM))
  234                         printf(
  235                             "pid %ld (%s): trap %d with interrupts disabled\n",
  236                             (long)curproc->p_pid, curproc->p_comm, type);
  237                 else if (type != T_BPTFLT && type != T_TRCTRAP)
  238                         /*
  239                          * XXX not quite right, since this may be for a
  240                          * multiple fault in user mode.
  241                          */
  242                         printf("kernel trap %d with interrupts disabled\n",
  243                             type);
  244                 enable_intr();
  245         }
  246 
  247         eva = 0;
  248         if (frame.tf_trapno == T_PAGEFLT) {
  249                 /*
  250                  * For some Cyrix CPUs, %cr2 is clobbered by interrupts.
  251                  * This problem is worked around by using an interrupt
  252                  * gate for the pagefault handler.  We are finally ready
  253                  * to read %cr2 and then must reenable interrupts.
  254                  *
  255                  * XXX this should be in the switch statement, but the
  256                  * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the
  257                  * flow of control too much for this to be obviously
  258                  * correct.
  259                  */
  260                 eva = rcr2();
  261                 enable_intr();
  262         }
  263 
  264 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  265 restart:
  266 #endif
  267         type = frame.tf_trapno;
  268         code = frame.tf_err;
  269 
  270 #ifdef VM86
  271         if (in_vm86call) {
  272                 if (frame.tf_eflags & PSL_VM &&
  273                     (type == T_PROTFLT || type == T_STKFLT)) {
  274                         i = vm86_emulate((struct vm86frame *)&frame);
  275                         if (i != 0)
  276                                 /*
  277                                  * returns to original process
  278                                  */
  279                                 vm86_trap((struct vm86frame *)&frame);
  280                         return;
  281                 }
  282                 switch (type) {
  283                         /*
  284                          * these traps want either a process context, or
  285                          * assume a normal userspace trap.
  286                          */
  287                 case T_PROTFLT:
  288                 case T_SEGNPFLT:
  289                         trap_fatal(&frame, eva);
  290                         return;
  291                 case T_TRCTRAP:
  292                         type = T_BPTFLT;        /* kernel breakpoint */
  293                         /* FALL THROUGH */
  294                 }
  295                 goto kernel_trap;       /* normal kernel trap handling */
  296         }
  297 #endif
  298 
  299         if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
  300                 /* user trap */
  301 
  302                 sticks = p->p_sticks;
  303                 p->p_md.md_regs = &frame;
  304 
  305                 switch (type) {
  306                 case T_PRIVINFLT:       /* privileged instruction fault */
  307                         ucode = type;
  308                         i = SIGILL;
  309                         break;
  310 
  311                 case T_BPTFLT:          /* bpt instruction fault */
  312                 case T_TRCTRAP:         /* trace trap */
  313                         frame.tf_eflags &= ~PSL_T;
  314                         i = SIGTRAP;
  315                         break;
  316 
  317                 case T_ARITHTRAP:       /* arithmetic trap */
  318                         ucode = code;
  319                         i = SIGFPE;
  320                         break;
  321 
  322                 case T_ASTFLT:          /* Allow process switch */
  323                         astoff();
  324                         cnt.v_soft++;
  325                         if (p->p_flag & P_OWEUPC) {
  326                                 p->p_flag &= ~P_OWEUPC;
  327                                 addupc_task(p, p->p_stats->p_prof.pr_addr,
  328                                             p->p_stats->p_prof.pr_ticks);
  329                         }
  330                         goto out;
  331 
  332                         /*
  333                          * The following two traps can happen in
  334                          * vm86 mode, and, if so, we want to handle
  335                          * them specially.
  336                          */
  337                 case T_PROTFLT:         /* general protection fault */
  338                 case T_STKFLT:          /* stack fault */
  339 #ifdef VM86
  340                         if (frame.tf_eflags & PSL_VM) {
  341                                 i = vm86_emulate((struct vm86frame *)&frame);
  342                                 if (i == 0)
  343                                         goto out;
  344                                 break;
  345                         }
  346 #endif /* VM86 */
  347                         /* FALL THROUGH */
  348 
  349                 case T_SEGNPFLT:        /* segment not present fault */
  350                 case T_TSSFLT:          /* invalid TSS fault */
  351                 case T_DOUBLEFLT:       /* double fault */
  352                 default:
  353                         ucode = code + BUS_SEGM_FAULT ;
  354                         i = SIGBUS;
  355                         break;
  356 
  357                 case T_PAGEFLT:         /* page fault */
  358                         i = trap_pfault(&frame, TRUE, eva);
  359                         if (i == -1)
  360                                 return;
  361 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  362                         if (i == -2)
  363                                 goto restart;
  364 #endif
  365                         if (i == 0)
  366                                 goto out;
  367 
  368                         ucode = T_PAGEFLT;
  369                         break;
  370 
  371                 case T_DIVIDE:          /* integer divide fault */
  372                         ucode = FPE_INTDIV_TRAP;
  373                         i = SIGFPE;
  374                         break;
  375 
  376 #if NISA > 0
  377                 case T_NMI:
  378 #ifdef POWERFAIL_NMI
  379                         goto handle_powerfail;
  380 #else /* !POWERFAIL_NMI */
  381 #ifdef DDB
  382                         /* NMI can be hooked up to a pushbutton for debugging */
  383                         printf ("NMI ... going to debugger\n");
  384                         if (kdb_trap (type, 0, &frame))
  385                                 return;
  386 #endif /* DDB */
  387                         /* machine/parity/power fail/"kitchen sink" faults */
  388                         if (isa_nmi(code) == 0) return;
  389                         panic("NMI indicates hardware failure");
  390 #endif /* POWERFAIL_NMI */
  391 #endif /* NISA > 0 */
  392 
  393                 case T_OFLOW:           /* integer overflow fault */
  394                         ucode = FPE_INTOVF_TRAP;
  395                         i = SIGFPE;
  396                         break;
  397 
  398                 case T_BOUND:           /* bounds check fault */
  399                         ucode = FPE_SUBRNG_TRAP;
  400                         i = SIGFPE;
  401                         break;
  402 
  403                 case T_DNA:
  404 #if NNPX > 0
  405                         /* if a transparent fault (due to context switch "late") */
  406                         if (npxdna())
  407                                 return;
  408 #endif
  409                         if (!pmath_emulate) {
  410                                 i = SIGFPE;
  411                                 ucode = FPE_FPU_NP_TRAP;
  412                                 break;
  413                         }
  414                         i = (*pmath_emulate)(&frame);
  415                         if (i == 0) {
  416                                 if (!(frame.tf_eflags & PSL_T))
  417                                         return;
  418                                 frame.tf_eflags &= ~PSL_T;
  419                                 i = SIGTRAP;
  420                         }
  421                         /* else ucode = emulator_only_knows() XXX */
  422                         break;
  423 
  424                 case T_FPOPFLT:         /* FPU operand fetch fault */
  425                         ucode = T_FPOPFLT;
  426                         i = SIGILL;
  427                         break;
  428                 }
  429         } else {
  430 #ifdef VM86
  431 kernel_trap:
  432 #endif
  433                 /* kernel trap */
  434 
  435                 switch (type) {
  436                 case T_PAGEFLT:                 /* page fault */
  437                         (void) trap_pfault(&frame, FALSE, eva);
  438                         return;
  439 
  440                 case T_DNA:
  441 #if NNPX > 0
  442                         /*
  443                          * The kernel is apparently using npx for copying.
  444                          * XXX this should be fatal unless the kernel has
  445                          * registered such use.
  446                          */
  447                         if (npxdna())
  448                                 return;
  449 #endif
  450                         break;
  451 
  452                 case T_PROTFLT:         /* general protection fault */
  453                 case T_SEGNPFLT:        /* segment not present fault */
  454                         /*
  455                          * Invalid segment selectors and out of bounds
  456                          * %eip's and %esp's can be set up in user mode.
  457                          * This causes a fault in kernel mode when the
  458                          * kernel tries to return to user mode.  We want
  459                          * to get this fault so that we can fix the
  460                          * problem here and not have to check all the
  461                          * selectors and pointers when the user changes
  462                          * them.
  463                          */
  464 #define MAYBE_DORETI_FAULT(where, whereto)                              \
  465         do {                                                            \
  466                 if (frame.tf_eip == (int)where) {                       \
  467                         frame.tf_eip = (int)whereto;                    \
  468                         return;                                         \
  469                 }                                                       \
  470         } while (0)
  471 
  472                         if (intr_nesting_level == 0) {
  473                                 /*
  474                                  * Invalid %fs's and %gs's can be created using
  475                                  * procfs or PT_SETREGS or by invalidating the
  476                                  * underlying LDT entry.  This causes a fault
  477                                  * in kernel mode when the kernel attempts to
  478                                  * switch contexts.  Lose the bad context
  479                                  * (XXX) so that we can continue, and generate
  480                                  * a signal.
  481                                  */
  482                                 if (frame.tf_eip == (int)cpu_switch_load_fs) {
  483                                         curpcb->pcb_fs = 0;
  484                                         psignal(p, SIGBUS);
  485                                         return;
  486                                 }
  487                                 if (frame.tf_eip == (int)cpu_switch_load_gs) {
  488                                         curpcb->pcb_gs = 0;
  489                                         psignal(p, SIGBUS);
  490                                         return;
  491                                 }
  492                                 MAYBE_DORETI_FAULT(doreti_iret,
  493                                                    doreti_iret_fault);
  494                                 MAYBE_DORETI_FAULT(doreti_popl_ds,
  495                                                    doreti_popl_ds_fault);
  496                                 MAYBE_DORETI_FAULT(doreti_popl_es,
  497                                                    doreti_popl_es_fault);
  498                                 if (curpcb && curpcb->pcb_onfault) {
  499                                         frame.tf_eip = (int)curpcb->pcb_onfault;
  500                                         return;
  501                                 }
  502                         }
  503                         break;
  504 
  505                 case T_TSSFLT:
  506                         /*
  507                          * PSL_NT can be set in user mode and isn't cleared
  508                          * automatically when the kernel is entered.  This
  509                          * causes a TSS fault when the kernel attempts to
  510                          * `iret' because the TSS link is uninitialized.  We
  511                          * want to get this fault so that we can fix the
  512                          * problem here and not every time the kernel is
  513                          * entered.
  514                          */
  515                         if (frame.tf_eflags & PSL_NT) {
  516                                 frame.tf_eflags &= ~PSL_NT;
  517                                 return;
  518                         }
  519                         break;
  520 
  521                 case T_TRCTRAP:  /* trace trap */
  522                         if (frame.tf_eip == (int)IDTVEC(syscall)) {
  523                                 /*
  524                                  * We've just entered system mode via the
  525                                  * syscall lcall.  Continue single stepping
  526                                  * silently until the syscall handler has
  527                                  * saved the flags.
  528                                  */
  529                                 return;
  530                         }
  531                         if (frame.tf_eip == (int)IDTVEC(syscall) + 1) {
  532                                 /*
  533                                  * The syscall handler has now saved the
  534                                  * flags.  Stop single stepping it.
  535                                  */
  536                                 frame.tf_eflags &= ~PSL_T;
  537                                 return;
  538                         }
  539                         /*
  540                          * Fall through.
  541                          */
  542                 case T_BPTFLT:
  543                         /*
  544                          * If DDB is enabled, let it handle the debugger trap.
  545                          * Otherwise, debugger traps "can't happen".
  546                          */
  547 #ifdef DDB
  548                         if (kdb_trap (type, 0, &frame))
  549                                 return;
  550 #endif
  551                         break;
  552 
  553 #if NISA > 0
  554                 case T_NMI:
  555 #ifdef POWERFAIL_NMI
  556 #ifndef TIMER_FREQ
  557 #  define TIMER_FREQ 1193182
  558 #endif
  559         handle_powerfail:
  560                 {
  561                   static unsigned lastalert = 0;
  562 
  563                   if(time_second - lastalert > 10)
  564                     {
  565                       log(LOG_WARNING, "NMI: power fail\n");
  566                       sysbeep(TIMER_FREQ/880, hz);
  567                       lastalert = time_second;
  568                     }
  569                   return;
  570                 }
  571 #else /* !POWERFAIL_NMI */
  572 #ifdef DDB
  573                         /* NMI can be hooked up to a pushbutton for debugging */
  574                         printf ("NMI ... going to debugger\n");
  575                         if (kdb_trap (type, 0, &frame))
  576                                 return;
  577 #endif /* DDB */
  578                         /* machine/parity/power fail/"kitchen sink" faults */
  579                         if (isa_nmi(code) == 0) return;
  580                         /* FALL THROUGH */
  581 #endif /* POWERFAIL_NMI */
  582 #endif /* NISA > 0 */
  583                 }
  584 
  585                 trap_fatal(&frame, eva);
  586                 return;
  587         }
  588 
  589         /* Translate fault for emulators (e.g. Linux) */
  590         if (*p->p_sysent->sv_transtrap)
  591                 i = (*p->p_sysent->sv_transtrap)(i, type);
  592 
  593         trapsignal(p, i, ucode);
  594 
  595 #ifdef DEBUG
  596         if (type <= MAX_TRAP_MSG) {
  597                 uprintf("fatal process exception: %s",
  598                         trap_msg[type]);
  599                 if ((type == T_PAGEFLT) || (type == T_PROTFLT))
  600                         uprintf(", fault VA = 0x%lx", (u_long)eva);
  601                 uprintf("\n");
  602         }
  603 #endif
  604 
  605 out:
  606         userret(p, &frame, sticks);
  607 }
  608 
  609 #ifdef notyet
  610 /*
  611  * This version doesn't allow a page fault to user space while
  612  * in the kernel. The rest of the kernel needs to be made "safe"
  613  * before this can be used. I think the only things remaining
  614  * to be made safe are the iBCS2 code and the process tracing/
  615  * debugging code.
  616  */
  617 static int
  618 trap_pfault(frame, usermode, eva)
  619         struct trapframe *frame;
  620         int usermode;
  621         vm_offset_t eva;
  622 {
  623         vm_offset_t va;
  624         struct vmspace *vm = NULL;
  625         vm_map_t map = 0;
  626         int rv = 0;
  627         vm_prot_t ftype;
  628         struct proc *p = curproc;
  629 
  630         if (frame->tf_err & PGEX_W)
  631                 ftype = VM_PROT_READ | VM_PROT_WRITE;
  632         else
  633                 ftype = VM_PROT_READ;
  634 
  635         va = trunc_page(eva);
  636         if (va < VM_MIN_KERNEL_ADDRESS) {
  637                 vm_offset_t v;
  638                 vm_page_t mpte;
  639 
  640                 if (p == NULL ||
  641                     (!usermode && va < VM_MAXUSER_ADDRESS &&
  642                      (intr_nesting_level != 0 || curpcb == NULL ||
  643                       curpcb->pcb_onfault == NULL))) {
  644                         trap_fatal(frame, eva);
  645                         return (-1);
  646                 }
  647 
  648                 /*
  649                  * This is a fault on non-kernel virtual memory.
  650                  * vm is initialized above to NULL. If curproc is NULL
  651                  * or curproc->p_vmspace is NULL the fault is fatal.
  652                  */
  653                 vm = p->p_vmspace;
  654                 if (vm == NULL)
  655                         goto nogo;
  656 
  657                 map = &vm->vm_map;
  658 
  659                 /*
  660                  * Keep swapout from messing with us during this
  661                  *      critical time.
  662                  */
  663                 ++p->p_lock;
  664 
  665                 /*
  666                  * Grow the stack if necessary
  667                  */
  668 #ifndef VM_STACK
  669                 if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
  670                         if (!grow(p, va)) {
  671                                 rv = KERN_FAILURE;
  672                                 --p->p_lock;
  673                                 goto nogo;
  674                         }
  675                 }
  676 
  677 #else
  678                 /* grow_stack returns false only if va falls into
  679                  * a growable stack region and the stack growth
  680                  * fails.  It returns true if va was not within
  681                  * a growable stack region, or if the stack 
  682                  * growth succeeded.
  683                  */
  684                 if (!grow_stack (p, va)) {
  685                         rv = KERN_FAILURE;
  686                         --p->p_lock;
  687                         goto nogo;
  688                 }
  689 #endif
  690                 
  691                 /* Fault in the user page: */
  692                 rv = vm_fault(map, va, ftype,
  693                         (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
  694 
  695                 --p->p_lock;
  696         } else {
  697                 /*
  698                  * Don't allow user-mode faults in kernel address space.
  699                  */
  700                 if (usermode)
  701                         goto nogo;
  702 
  703                 /*
  704                  * Since we know that kernel virtual address addresses
  705                  * always have pte pages mapped, we just have to fault
  706                  * the page.
  707                  */
  708                 rv = vm_fault(kernel_map, va, ftype, FALSE);
  709         }
  710 
  711         if (rv == KERN_SUCCESS)
  712                 return (0);
  713 nogo:
  714         if (!usermode) {
  715                 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
  716                         frame->tf_eip = (int)curpcb->pcb_onfault;
  717                         return (0);
  718                 }
  719                 trap_fatal(frame, eva);
  720                 return (-1);
  721         }
  722 
  723         /* kludge to pass faulting virtual address to sendsig */
  724         frame->tf_err = eva;
  725 
  726         return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
  727 }
  728 #endif
  729 
  730 int
  731 trap_pfault(frame, usermode, eva)
  732         struct trapframe *frame;
  733         int usermode;
  734         vm_offset_t eva;
  735 {
  736         vm_offset_t va;
  737         struct vmspace *vm = NULL;
  738         vm_map_t map = 0;
  739         int rv = 0;
  740         vm_prot_t ftype;
  741         struct proc *p = curproc;
  742 
  743         va = trunc_page(eva);
  744         if (va >= KERNBASE) {
  745                 /*
  746                  * Don't allow user-mode faults in kernel address space.
  747                  * An exception:  if the faulting address is the invalid
  748                  * instruction entry in the IDT, then the Intel Pentium
  749                  * F00F bug workaround was triggered, and we need to
  750                  * treat it is as an illegal instruction, and not a page
  751                  * fault.
  752                  */
  753 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
  754                 if ((eva == (unsigned int)&t_idt[6]) && has_f00f_bug) {
  755                         frame->tf_trapno = T_PRIVINFLT;
  756                         return -2;
  757                 }
  758 #endif
  759                 if (usermode)
  760                         goto nogo;
  761 
  762                 map = kernel_map;
  763         } else {
  764                 /*
  765                  * This is a fault on non-kernel virtual memory.
  766                  * vm is initialized above to NULL. If curproc is NULL
  767                  * or curproc->p_vmspace is NULL the fault is fatal.
  768                  */
  769                 if (p != NULL)
  770                         vm = p->p_vmspace;
  771 
  772                 if (vm == NULL)
  773                         goto nogo;
  774 
  775                 map = &vm->vm_map;
  776         }
  777 
  778         if (frame->tf_err & PGEX_W)
  779                 ftype = VM_PROT_READ | VM_PROT_WRITE;
  780         else
  781                 ftype = VM_PROT_READ;
  782 
  783         if (map != kernel_map) {
  784                 /*
  785                  * Keep swapout from messing with us during this
  786                  *      critical time.
  787                  */
  788                 ++p->p_lock;
  789 
  790                 /*
  791                  * Grow the stack if necessary
  792                  */
  793 #ifndef VM_STACK
  794                 if ((caddr_t)va > vm->vm_maxsaddr && va < USRSTACK) {
  795                         if (!grow(p, va)) {
  796                                 rv = KERN_FAILURE;
  797                                 --p->p_lock;
  798                                 goto nogo;
  799                         }
  800                 }
  801 #else
  802                 /* grow_stack returns false only if va falls into
  803                  * a growable stack region and the stack growth
  804                  * fails.  It returns true if va was not within
  805                  * a growable stack region, or if the stack 
  806                  * growth succeeded.
  807                  */
  808                 if (!grow_stack (p, va)) {
  809                         rv = KERN_FAILURE;
  810                         --p->p_lock;
  811                         goto nogo;
  812                 }
  813 #endif
  814 
  815                 /* Fault in the user page: */
  816                 rv = vm_fault(map, va, ftype,
  817                         (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY : 0);
  818 
  819                 --p->p_lock;
  820         } else {
  821                 /*
  822                  * Don't have to worry about process locking or stacks in the kernel.
  823                  */
  824                 rv = vm_fault(map, va, ftype, FALSE);
  825         }
  826 
  827         if (rv == KERN_SUCCESS)
  828                 return (0);
  829 nogo:
  830         if (!usermode) {
  831                 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) {
  832                         frame->tf_eip = (int)curpcb->pcb_onfault;
  833                         return (0);
  834                 }
  835                 trap_fatal(frame, eva);
  836                 return (-1);
  837         }
  838 
  839         /* kludge to pass faulting virtual address to sendsig */
  840         frame->tf_err = eva;
  841 
  842         return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV);
  843 }
  844 
  845 static void
  846 trap_fatal(frame, eva)
  847         struct trapframe *frame;
  848         vm_offset_t eva;
  849 {
  850         int code, type, ss, esp;
  851         struct soft_segment_descriptor softseg;
  852 
  853         code = frame->tf_err;
  854         type = frame->tf_trapno;
  855         sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg);
  856 
  857         if (type <= MAX_TRAP_MSG)
  858                 printf("\n\nFatal trap %d: %s while in %s mode\n",
  859                         type, trap_msg[type],
  860                         frame->tf_eflags & PSL_VM ? "vm86" :
  861                         ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel");
  862 #ifdef SMP
  863         /* three seperate prints in case of a trap on an unmapped page */
  864         printf("mp_lock = %08x; ", mp_lock);
  865         printf("cpuid = %d; ", cpuid);
  866         printf("lapic.id = %08x\n", lapic.id);
  867 #endif
  868         if (type == T_PAGEFLT) {
  869                 printf("fault virtual address   = 0x%x\n", eva);
  870                 printf("fault code              = %s %s, %s\n",
  871                         code & PGEX_U ? "user" : "supervisor",
  872                         code & PGEX_W ? "write" : "read",
  873                         code & PGEX_P ? "protection violation" : "page not present");
  874         }
  875         printf("instruction pointer     = 0x%x:0x%x\n",
  876                frame->tf_cs & 0xffff, frame->tf_eip);
  877         if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) {
  878                 ss = frame->tf_ss & 0xffff;
  879                 esp = frame->tf_esp;
  880         } else {
  881                 ss = GSEL(GDATA_SEL, SEL_KPL);
  882                 esp = (int)&frame->tf_esp;
  883         }
  884         printf("stack pointer           = 0x%x:0x%x\n", ss, esp);
  885         printf("frame pointer           = 0x%x:0x%x\n", ss, frame->tf_ebp);
  886         printf("code segment            = base 0x%x, limit 0x%x, type 0x%x\n",
  887                softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type);
  888         printf("                        = DPL %d, pres %d, def32 %d, gran %d\n",
  889                softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32,
  890                softseg.ssd_gran);
  891         printf("processor eflags        = ");
  892         if (frame->tf_eflags & PSL_T)
  893                 printf("trace trap, ");
  894         if (frame->tf_eflags & PSL_I)
  895                 printf("interrupt enabled, ");
  896         if (frame->tf_eflags & PSL_NT)
  897                 printf("nested task, ");
  898         if (frame->tf_eflags & PSL_RF)
  899                 printf("resume, ");
  900         if (frame->tf_eflags & PSL_VM)
  901                 printf("vm86, ");
  902         printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12);
  903         printf("current process         = ");
  904         if (curproc) {
  905                 printf("%lu (%s)\n",
  906                     (u_long)curproc->p_pid, curproc->p_comm ?
  907                     curproc->p_comm : "");
  908         } else {
  909                 printf("Idle\n");
  910         }
  911         printf("interrupt mask          = ");
  912         if ((cpl & net_imask) == net_imask)
  913                 printf("net ");
  914         if ((cpl & tty_imask) == tty_imask)
  915                 printf("tty ");
  916         if ((cpl & bio_imask) == bio_imask)
  917                 printf("bio ");
  918         if ((cpl & cam_imask) == cam_imask)
  919                 printf("cam ");
  920         if (cpl == 0)
  921                 printf("none");
  922 #ifdef SMP
  923 /**
  924  *  XXX FIXME:
  925  *      we probably SHOULD have stopped the other CPUs before now!
  926  *      another CPU COULD have been touching cpl at this moment...
  927  */
  928         printf(" <- SMP: XXX");
  929 #endif
  930         printf("\n");
  931 
  932 #ifdef KDB
  933         if (kdb_trap(&psl))
  934                 return;
  935 #endif
  936 #ifdef DDB
  937         if ((debugger_on_panic || in_Debugger) && kdb_trap(type, 0, frame))
  938                 return;
  939 #endif
  940         printf("trap number             = %d\n", type);
  941         if (type <= MAX_TRAP_MSG)
  942                 panic(trap_msg[type]);
  943         else
  944                 panic("unknown/reserved trap");
  945 }
  946 
  947 /*
  948  * Double fault handler. Called when a fault occurs while writing
  949  * a frame for a trap/exception onto the stack. This usually occurs
  950  * when the stack overflows (such is the case with infinite recursion,
  951  * for example).
  952  *
  953  * XXX Note that the current PTD gets replaced by IdlePTD when the
  954  * task switch occurs. This means that the stack that was active at
  955  * the time of the double fault is not available at <kstack> unless
  956  * the machine was idle when the double fault occurred. The downside
  957  * of this is that "trace <ebp>" in ddb won't work.
  958  */
  959 void
  960 dblfault_handler()
  961 {
  962         printf("\nFatal double fault:\n");
  963         printf("eip = 0x%x\n", common_tss.tss_eip);
  964         printf("esp = 0x%x\n", common_tss.tss_esp);
  965         printf("ebp = 0x%x\n", common_tss.tss_ebp);
  966 #ifdef SMP
  967         /* three seperate prints in case of a trap on an unmapped page */
  968         printf("mp_lock = %08x; ", mp_lock);
  969         printf("cpuid = %d; ", cpuid);
  970         printf("lapic.id = %08x\n", lapic.id);
  971 #endif
  972         panic("double fault");
  973 }
  974 
  975 /*
  976  * Compensate for 386 brain damage (missing URKR).
  977  * This is a little simpler than the pagefault handler in trap() because
  978  * it the page tables have already been faulted in and high addresses
  979  * are thrown out early for other reasons.
  980  */
  981 int trapwrite(addr)
  982         unsigned addr;
  983 {
  984         struct proc *p;
  985         vm_offset_t va;
  986         struct vmspace *vm;
  987         int rv;
  988 
  989         va = trunc_page((vm_offset_t)addr);
  990         /*
  991          * XXX - MAX is END.  Changed > to >= for temp. fix.
  992          */
  993         if (va >= VM_MAXUSER_ADDRESS)
  994                 return (1);
  995 
  996         p = curproc;
  997         vm = p->p_vmspace;
  998 
  999         ++p->p_lock;
 1000 
 1001 #ifndef VM_STACK
 1002         if ((caddr_t)va >= vm->vm_maxsaddr && va < USRSTACK) {
 1003                 if (!grow(p, va)) {
 1004                         --p->p_lock;
 1005                         return (1);
 1006                 }
 1007         }
 1008 #else
 1009         if (!grow_stack (p, va)) {
 1010                 --p->p_lock;
 1011                 return (1);
 1012         }
 1013 #endif
 1014 
 1015         /*
 1016          * fault the data page
 1017          */
 1018         rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_DIRTY);
 1019 
 1020         --p->p_lock;
 1021 
 1022         if (rv != KERN_SUCCESS)
 1023                 return 1;
 1024 
 1025         return (0);
 1026 }
 1027 
 1028 /*
 1029  * System call request from POSIX system call gate interface to kernel.
 1030  * Like trap(), argument is call by reference.
 1031  */
 1032 void
 1033 syscall(frame)
 1034         struct trapframe frame;
 1035 {
 1036         caddr_t params;
 1037         int i;
 1038         struct sysent *callp;
 1039         struct proc *p = curproc;
 1040         u_quad_t sticks;
 1041         int error;
 1042         int args[8];
 1043         u_int code;
 1044 
 1045 #ifdef DIAGNOSTIC
 1046         if (ISPL(frame.tf_cs) != SEL_UPL)
 1047                 panic("syscall");
 1048 #endif
 1049         sticks = p->p_sticks;
 1050         p->p_md.md_regs = &frame;
 1051         params = (caddr_t)frame.tf_esp + sizeof(int);
 1052         code = frame.tf_eax;
 1053         if (p->p_sysent->sv_prepsyscall) {
 1054                 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, &params);
 1055         } else {
 1056                 /*
 1057                  * Need to check if this is a 32 bit or 64 bit syscall.
 1058                  */
 1059                 if (code == SYS_syscall) {
 1060                         /*
 1061                          * Code is first argument, followed by actual args.
 1062                          */
 1063                         code = fuword(params);
 1064                         params += sizeof(int);
 1065                 } else if (code == SYS___syscall) {
 1066                         /*
 1067                          * Like syscall, but code is a quad, so as to maintain
 1068                          * quad alignment for the rest of the arguments.
 1069                          */
 1070                         code = fuword(params);
 1071                         params += sizeof(quad_t);
 1072                 }
 1073         }
 1074 
 1075         if (p->p_sysent->sv_mask)
 1076                 code &= p->p_sysent->sv_mask;
 1077 
 1078         if (code >= p->p_sysent->sv_size)
 1079                 callp = &p->p_sysent->sv_table[0];
 1080         else
 1081                 callp = &p->p_sysent->sv_table[code];
 1082 
 1083         if (params && (i = callp->sy_narg * sizeof(int)) &&
 1084             (error = copyin(params, (caddr_t)args, (u_int)i))) {
 1085 #ifdef KTRACE
 1086                 if (KTRPOINT(p, KTR_SYSCALL))
 1087                         ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
 1088 #endif
 1089                 goto bad;
 1090         }
 1091 #ifdef KTRACE
 1092         if (KTRPOINT(p, KTR_SYSCALL))
 1093                 ktrsyscall(p->p_tracep, code, callp->sy_narg, args);
 1094 #endif
 1095         p->p_retval[0] = 0;
 1096         p->p_retval[1] = frame.tf_edx;
 1097 
 1098         STOPEVENT(p, S_SCE, callp->sy_narg);
 1099 
 1100         error = (*callp->sy_call)(p, args);
 1101 
 1102         switch (error) {
 1103 
 1104         case 0:
 1105                 /*
 1106                  * Reinitialize proc pointer `p' as it may be different
 1107                  * if this is a child returning from fork syscall.
 1108                  */
 1109                 p = curproc;
 1110                 frame.tf_eax = p->p_retval[0];
 1111                 frame.tf_edx = p->p_retval[1];
 1112                 frame.tf_eflags &= ~PSL_C;
 1113                 break;
 1114 
 1115         case ERESTART:
 1116                 /*
 1117                  * Reconstruct pc, assuming lcall $X,y is 7 bytes,
 1118                  * int 0x80 is 2 bytes. We saved this in tf_err.
 1119                  */
 1120                 frame.tf_eip -= frame.tf_err;
 1121                 break;
 1122 
 1123         case EJUSTRETURN:
 1124                 break;
 1125 
 1126         default:
 1127 bad:
 1128                 if (p->p_sysent->sv_errsize)
 1129                         if (error >= p->p_sysent->sv_errsize)
 1130                                 error = -1;     /* XXX */
 1131                         else
 1132                                 error = p->p_sysent->sv_errtbl[error];
 1133                 frame.tf_eax = error;
 1134                 frame.tf_eflags |= PSL_C;
 1135                 break;
 1136         }
 1137 
 1138         if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) {
 1139                 /* Traced syscall. */
 1140                 frame.tf_eflags &= ~PSL_T;
 1141                 trapsignal(p, SIGTRAP, 0);
 1142         }
 1143 
 1144         userret(p, &frame, sticks);
 1145 
 1146 #ifdef KTRACE
 1147         if (KTRPOINT(p, KTR_SYSRET))
 1148                 ktrsysret(p->p_tracep, code, error, p->p_retval[0]);
 1149 #endif
 1150 
 1151         /*
 1152          * This works because errno is findable through the
 1153          * register set.  If we ever support an emulation where this
 1154          * is not the case, this code will need to be revisited.
 1155          */
 1156         STOPEVENT(p, S_SCX, code);
 1157 
 1158 }
 1159 
 1160 /*
 1161  * Simplified back end of syscall(), used when returning from fork()
 1162  * directly into user mode.
 1163  */
 1164 void
 1165 fork_return(p, frame)
 1166         struct proc *p;
 1167         struct trapframe frame;
 1168 {
 1169         frame.tf_eax = 0;               /* Child returns zero */
 1170         frame.tf_eflags &= ~PSL_C;      /* success */
 1171         frame.tf_edx = 1;
 1172 
 1173         userret(p, &frame, 0);
 1174 #ifdef KTRACE
 1175         if (KTRPOINT(p, KTR_SYSRET))
 1176                 ktrsysret(p->p_tracep, SYS_fork, 0, 0);
 1177 #endif
 1178 }

Cache object: d803135cf64778662398fe00da97a983


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.