The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/trap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 /*
   23  * @OSF_COPYRIGHT@
   24  */
   25 
   26 #include <mach_kdb.h>
   27 #include <mach_kdp.h>
   28 #include <debug.h>
   29 
   30 #include <mach/mach_types.h>
   31 #include <mach/mach_traps.h>
   32 #include <mach/thread_status.h>
   33 
   34 #include <kern/processor.h>
   35 #include <kern/thread.h>
   36 #include <kern/exception.h>
   37 #include <kern/syscall_sw.h>
   38 #include <kern/cpu_data.h>
   39 #include <kern/debug.h>
   40 
   41 #include <vm/vm_fault.h>
   42 #include <vm/vm_kern.h>         /* For kernel_map */
   43 
   44 #include <ppc/misc_protos.h>
   45 #include <ppc/trap.h>
   46 #include <ppc/exception.h>
   47 #include <ppc/proc_reg.h>       /* for SR_xxx definitions */
   48 #include <ppc/pmap.h>
   49 #include <ppc/mem.h>
   50 #include <ppc/mappings.h>
   51 #include <ppc/Firmware.h>
   52 #include <ppc/low_trace.h>
   53 #include <ppc/Diagnostics.h>
   54 #include <ppc/hw_perfmon.h>
   55 
   56 #include <sys/kdebug.h>
   57 
   58 perfCallback perfTrapHook = 0; /* Pointer to CHUD trap hook routine */
   59 perfCallback perfASTHook = 0;  /* Pointer to CHUD AST hook routine */
   60 
   61 #if     MACH_KDB
   62 #include <ddb/db_watch.h>
   63 #include <ddb/db_run.h>
   64 #include <ddb/db_break.h>
   65 #include <ddb/db_trap.h>
   66 
   67 boolean_t let_ddb_vm_fault = FALSE;
   68 boolean_t       debug_all_traps_with_kdb = FALSE;
   69 extern struct db_watchpoint *db_watchpoint_list;
   70 extern boolean_t db_watchpoints_inserted;
   71 extern boolean_t db_breakpoints_inserted;
   72 
   73 
   74 
   75 #endif  /* MACH_KDB */
   76 
   77 extern task_t bsd_init_task;
   78 extern char init_task_failure_data[];
   79 extern int not_in_kdp;
   80 
   81 #define PROT_EXEC       (VM_PROT_EXECUTE)
   82 #define PROT_RO         (VM_PROT_READ)
   83 #define PROT_RW         (VM_PROT_READ|VM_PROT_WRITE)
   84 
   85 /* A useful macro to update the ppc_exception_state in the PCB
   86  * before calling doexception
   87  */
   88 #define UPDATE_PPC_EXCEPTION_STATE {                                                    \
   89         thread_t _thread = current_thread();                                                    \
   90         _thread->machine.pcb->save_dar = (uint64_t)dar;                                 \
   91         _thread->machine.pcb->save_dsisr = dsisr;                                               \
   92         _thread->machine.pcb->save_exception = trapno / T_VECTOR_SIZE;  /* back to powerpc */ \
   93 }
   94 
   95 void unresolved_kernel_trap(int trapno,
   96                                    struct savearea *ssp,
   97                                    unsigned int dsisr,
   98                                    addr64_t dar,
   99                                    const char *message);
  100 
  101 static void handleMck(struct savearea *ssp);            /* Common machine check handler */
  102 
  103 #ifdef MACH_BSD
  104 extern void get_procrustime(time_value_t *);
  105 extern void bsd_uprofil(time_value_t *, user_addr_t);
  106 #endif /* MACH_BSD */
  107 
  108 
  109 struct savearea *trap(int trapno,
  110                              struct savearea *ssp,
  111                              unsigned int dsisr,
  112                              addr64_t dar)
  113 {
  114         int exception;
  115         int code;
  116         int subcode;
  117         vm_map_t map;
  118     unsigned int sp;
  119         unsigned int space, space2;
  120         vm_map_offset_t offset;
  121         thread_t thread = current_thread();
  122         boolean_t intr;
  123         ast_t *myast;
  124         
  125 #ifdef MACH_BSD
  126         time_value_t tv;
  127 #endif /* MACH_BSD */
  128 
  129         myast = ast_pending();
  130         if(perfASTHook) {
  131                 if(*myast & AST_PPC_CHUD_ALL) {
  132                         perfASTHook(trapno, ssp, dsisr, (unsigned int)dar);
  133                 }
  134         } else {
  135                 *myast &= ~AST_PPC_CHUD_ALL;
  136         }
  137 
  138         if(perfTrapHook) {                                                      /* Is there a hook? */
  139                 if(perfTrapHook(trapno, ssp, dsisr, (unsigned int)dar) == KERN_SUCCESS) return ssp;     /* If it succeeds, we are done... */
  140         }
  141 
  142 #if 0
  143         {
  144                 extern void fctx_text(void);
  145                 fctx_test();
  146         }
  147 #endif
  148 
  149         exception = 0;                                                          /* Clear exception for now */
  150 
  151 /*
  152  *      Remember that we are disabled for interruptions when we come in here.  Because
  153  *      of latency concerns, we need to enable interruptions in the interrupted process
  154  *      was enabled itself as soon as we can.
  155  */
  156 
  157         intr = (ssp->save_srr1 & MASK(MSR_EE)) != 0;    /* Remember if we were enabled */
  158 
  159         /* Handle kernel traps first */
  160 
  161         if (!USER_MODE(ssp->save_srr1)) {
  162                 /*
  163                  * Trap came from kernel
  164                  */
  165                 switch (trapno) {
  166 
  167                 case T_PREEMPT:                 /* Handle a preempt trap */
  168                         ast_taken(AST_PREEMPTION, FALSE);
  169                         break;  
  170 
  171                 case T_PERF_MON:
  172                         perfmon_handle_pmi(ssp);
  173                         break;
  174 
  175                 case T_RESET:                                   /* Reset interruption */
  176                         if (!Call_Debugger(trapno, ssp))
  177                                 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  178                         break;                                          /* We just ignore these */
  179                 
  180                 /*
  181                  * These trap types should never be seen by trap()
  182                  * in kernel mode, anyway.
  183                  * Some are interrupts that should be seen by
  184                  * interrupt() others just don't happen because they
  185                  * are handled elsewhere. Some could happen but are
  186                  * considered to be fatal in kernel mode.
  187                  */
  188                 case T_DECREMENTER:
  189                 case T_IN_VAIN:                 /* Shouldn't ever see this, lowmem_vectors eats it */
  190                 case T_SYSTEM_MANAGEMENT:
  191                 case T_ALTIVEC_ASSIST:
  192                 case T_INTERRUPT:
  193                 case T_FP_UNAVAILABLE:
  194                 case T_IO_ERROR:
  195                 case T_RESERVED:
  196                 default:
  197                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  198                         break;
  199 
  200 
  201 /*
  202  *                      Here we handle a machine check in the kernel
  203  */
  204 
  205                 case T_MACHINE_CHECK:
  206                         handleMck(ssp);                                         /* Common to both user and kernel */
  207                         break;
  208 
  209 
  210                 case T_ALIGNMENT:
  211 /*
  212 *                       If enaNotifyEMb is set, we get here, and
  213 *                       we have actually already emulated the unaligned access.
  214 *                       All that we want to do here is to ignore the interrupt. This is to allow logging or
  215 *                       tracing of unaligned accesses.  
  216 */
  217                         
  218                         if(ssp->save_hdr.save_misc3) {                          /* Was it a handled exception? */
  219                                 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);  /* Go panic */
  220                                 break;
  221                         }
  222                         KERNEL_DEBUG_CONSTANT(
  223                                 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
  224                                 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
  225                         break;
  226 
  227                 case T_EMULATE:
  228 /*
  229 *                       If enaNotifyEMb is set we get here, and
  230 *                       we have actually already emulated the instruction.
  231 *                       All that we want to do here is to ignore the interrupt. This is to allow logging or
  232 *                       tracing of emulated instructions.  
  233 */
  234 
  235                         KERNEL_DEBUG_CONSTANT(
  236                                 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
  237                                 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
  238                         break;
  239 
  240 
  241 
  242 
  243                         
  244                 case T_TRACE:
  245                 case T_RUNMODE_TRACE:
  246                 case T_INSTRUCTION_BKPT:
  247                         if (!Call_Debugger(trapno, ssp))
  248                                 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  249                         break;
  250 
  251                 case T_PROGRAM:
  252                         if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
  253                                 if (!Call_Debugger(trapno, ssp))
  254                                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  255                         } else {
  256                                 unresolved_kernel_trap(trapno, ssp, 
  257                                                         dsisr, dar, NULL);
  258                         }
  259                         break;
  260 
  261                 case T_DATA_ACCESS:
  262 #if     MACH_KDB
  263                         mp_disable_preemption();
  264                         if (debug_mode
  265                             && getPerProc()->debugger_active
  266                             && !let_ddb_vm_fault) {
  267                                 /*
  268                                  * Force kdb to handle this one.
  269                                  */
  270                                 kdb_trap(trapno, ssp);
  271                         }
  272                         mp_enable_preemption();
  273 #endif  /* MACH_KDB */
  274                         /* can we take this during normal panic dump operation? */
  275                         if (debug_mode
  276                             && getPerProc()->debugger_active
  277                             && !not_in_kdp) {
  278                                 /* 
  279                                  * Access fault while in kernel core dump.
  280                                  */
  281                                 kdp_dump_trap(trapno, ssp); 
  282                         }
  283 
  284 
  285                         if(ssp->save_dsisr & dsiInvMode) {                      /* Did someone try to reserve cache inhibited? */
  286                                 panic("trap: disallowed access to cache inhibited memory - %016llX\n", dar);
  287                         }
  288 
  289                         if(intr) ml_set_interrupts_enabled(TRUE);       /* Enable if we were */
  290                         
  291                         if(((dar >> 28) < 0xE) | ((dar >> 28) > 0xF))  {        /* User memory window access? */
  292                         
  293                                 offset = (vm_map_offset_t)dar;                          /* Set the failing address */
  294                                 map = kernel_map;                                               /* No, this is a normal kernel access */
  295                                 
  296 /*
  297  *      Note: Some ROM device drivers will access page 0 when they start.  The IOKit will 
  298  *      set a flag to tell us to ignore any access fault on page 0.  After the driver is
  299  *      opened, it will clear the flag.
  300  */
  301                                 if((0 == (offset & -PAGE_SIZE)) &&              /* Check for access of page 0 and */
  302                                   ((thread->machine.specFlags) & ignoreZeroFault)) {    /* special case of ignoring page zero faults */
  303                                         ssp->save_srr0 += 4;                            /* Point to next instruction */
  304                                         break;
  305                                 }
  306 
  307                                 code = vm_fault(map, vm_map_trunc_page(offset),
  308                                                 dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
  309                                                 FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
  310 
  311                                 if (code != KERN_SUCCESS) {
  312                                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  313                                 } else { 
  314                                         ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
  315                                         ssp->save_dsisr = (ssp->save_dsisr & 
  316                                                 ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);   /* Make sure this is marked as a miss */
  317                                 }
  318                                 break;
  319                         }
  320 
  321                         /* If we get here, the fault was due to a user memory window access */
  322 
  323                         map = thread->map;
  324                         
  325                         offset = (vm_map_offset_t)(thread->machine.umwRelo + dar);      /* Compute the user space address */
  326 
  327                         code = vm_fault(map, vm_map_trunc_page(offset),
  328                                         dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
  329                                         FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
  330 
  331                         /* If we failed, there should be a recovery
  332                          * spot to rfi to.
  333                          */
  334                         if (code != KERN_SUCCESS) {
  335                                 if (thread->recover) {
  336                                         ssp->save_srr0 = thread->recover;
  337                                         thread->recover = (vm_offset_t)NULL;
  338                                 } else {
  339                                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point");
  340                                 }
  341                         }
  342                         else { 
  343                                 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
  344                                 ssp->save_dsisr = (ssp->save_dsisr & 
  345                                         ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);   /* Make sure this is marked as a miss */
  346                         }
  347                         
  348                         break;
  349                         
  350                 case T_INSTRUCTION_ACCESS:
  351 
  352 #if     MACH_KDB
  353                         if (debug_mode
  354                             && getPerProc()->debugger_active
  355                             && !let_ddb_vm_fault) {
  356                                 /*
  357                                  * Force kdb to handle this one.
  358                                  */
  359                                 kdb_trap(trapno, ssp);
  360                         }
  361 #endif  /* MACH_KDB */
  362 
  363                         /* Same as for data access, except fault type
  364                          * is PROT_EXEC and addr comes from srr0
  365                          */
  366 
  367                         if(intr) ml_set_interrupts_enabled(TRUE);       /* Enable if we were */
  368 
  369                         map = kernel_map;
  370                         
  371                         code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
  372                                         PROT_EXEC, FALSE, THREAD_UNINT, NULL, vm_map_trunc_page(0));
  373 
  374                         if (code != KERN_SUCCESS) {
  375                                 unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  376                         } else { 
  377                                 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
  378                                 ssp->save_srr1 = (ssp->save_srr1 & 
  379                                         ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);               /* Make sure this is marked as a miss */
  380                         }
  381                         break;
  382 
  383                 /* Usually shandler handles all the system calls, but the
  384                  * atomic thread switcher may throwup (via thandler) and
  385                  * have to pass it up to the exception handler.
  386                  */
  387 
  388                 case T_SYSTEM_CALL:
  389                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  390                         break;
  391 
  392                 case T_AST:
  393                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  394                         break;
  395                 }
  396         } else {
  397 
  398                 /* 
  399                  * Processing for user state traps with interrupt enabled
  400                  * For T_AST, interrupts are enabled in the AST delivery
  401                  */
  402                 if (trapno != T_AST) 
  403                         ml_set_interrupts_enabled(TRUE);
  404 
  405 #ifdef MACH_BSD
  406                 {
  407                         get_procrustime(&tv);
  408                 }
  409 #endif /* MACH_BSD */
  410 
  411         
  412                 /*
  413                  * Trap came from user task
  414                  */
  415 
  416                 switch (trapno) {
  417 
  418                 case T_PREEMPT:
  419                         unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL);
  420                         break;  
  421 
  422                 case T_PERF_MON:
  423                         perfmon_handle_pmi(ssp);
  424                         break;
  425 
  426                         /*
  427                          * These trap types should never be seen by trap()
  428                          * Some are interrupts that should be seen by
  429                          * interrupt() others just don't happen because they
  430                          * are handled elsewhere.
  431                          */
  432                 case T_DECREMENTER:
  433                 case T_IN_VAIN:                                                         /* Shouldn't ever see this, lowmem_vectors eats it */
  434                 case T_INTERRUPT:
  435                 case T_FP_UNAVAILABLE:
  436                 case T_SYSTEM_MANAGEMENT:
  437                 case T_RESERVED:
  438                 case T_IO_ERROR:
  439                         
  440                 default:
  441 
  442                         ml_set_interrupts_enabled(FALSE);                                       /* Turn off interruptions */
  443 
  444                         panic("Unexpected user state trap(cpu %d): 0x%08X DSISR=0x%08X DAR=0x%016llX PC=0x%016llX, MSR=0x%016llX\n",
  445                                cpu_number(), trapno, dsisr, dar, ssp->save_srr0, ssp->save_srr1);
  446                         break;
  447 
  448 
  449 /*
  450  *                      Here we handle a machine check in user state
  451  */
  452 
  453                 case T_MACHINE_CHECK:
  454                         handleMck(ssp);                                         /* Common to both user and kernel */
  455                         break;
  456 
  457                 case T_RESET:
  458                         ml_set_interrupts_enabled(FALSE);                                       /* Turn off interruptions */
  459                         if (!Call_Debugger(trapno, ssp))
  460                                 panic("Unexpected Reset exception: srr0 = %016llx, srr1 = %016llx\n",
  461                                         ssp->save_srr0, ssp->save_srr1);
  462                         break;                                          /* We just ignore these */
  463 
  464                 case T_ALIGNMENT:
  465 /*
  466 *                       If enaNotifyEMb is set, we get here, and
  467 *                       we have actually already emulated the unaligned access.
  468 *                       All that we want to do here is to ignore the interrupt. This is to allow logging or
  469 *                       tracing of unaligned accesses.  
  470 */
  471                         
  472                         KERNEL_DEBUG_CONSTANT(
  473                                 MACHDBG_CODE(DBG_MACH_EXCP_ALNG, 0) | DBG_FUNC_NONE,
  474                                 (int)ssp->save_srr0 - 4, (int)dar, (int)dsisr, (int)ssp->save_lr, 0);
  475                         
  476                         if(ssp->save_hdr.save_misc3) {                          /* Was it a handled exception? */
  477                                 exception = EXC_BAD_ACCESS;                             /* Yes, throw exception */
  478                                 code = EXC_PPC_UNALIGNED;
  479                                 subcode = (unsigned int)dar;
  480                         }
  481                         break;
  482 
  483                 case T_EMULATE:
  484 /*
  485 *                       If enaNotifyEMb is set we get here, and
  486 *                       we have actually already emulated the instruction.
  487 *                       All that we want to do here is to ignore the interrupt. This is to allow logging or
  488 *                       tracing of emulated instructions.  
  489 */
  490 
  491                         KERNEL_DEBUG_CONSTANT(
  492                                 MACHDBG_CODE(DBG_MACH_EXCP_EMUL, 0) | DBG_FUNC_NONE,
  493                                 (int)ssp->save_srr0 - 4, (int)((savearea_comm *)ssp)->save_misc2, (int)dsisr, (int)ssp->save_lr, 0);
  494                         break;
  495 
  496                 case T_TRACE:                   /* Real PPC chips */
  497                   if (be_tracing()) {
  498                     add_pcbuffer();
  499                     return ssp;
  500                   }
  501                   /* fall through */
  502 
  503                 case T_INSTRUCTION_BKPT:
  504                         exception = EXC_BREAKPOINT;
  505                         code = EXC_PPC_TRACE;
  506                         subcode = (unsigned int)ssp->save_srr0;
  507                         break;
  508 
  509                 case T_PROGRAM:
  510                         if (ssp->save_srr1 & MASK(SRR1_PRG_FE)) {
  511                                 fpu_save(thread->machine.curctx);
  512                                 UPDATE_PPC_EXCEPTION_STATE;
  513                                 exception = EXC_ARITHMETIC;
  514                                 code = EXC_ARITHMETIC;
  515                         
  516                                 mp_disable_preemption();
  517                                 subcode = ssp->save_fpscr;
  518                                 mp_enable_preemption();
  519                         }       
  520                         else if (ssp->save_srr1 & MASK(SRR1_PRG_ILL_INS)) {
  521                                 
  522                                 UPDATE_PPC_EXCEPTION_STATE
  523                                 exception = EXC_BAD_INSTRUCTION;
  524                                 code = EXC_PPC_UNIPL_INST;
  525                                 subcode = (unsigned int)ssp->save_srr0;
  526                         } else if ((unsigned int)ssp->save_srr1 & MASK(SRR1_PRG_PRV_INS)) {
  527 
  528                                 UPDATE_PPC_EXCEPTION_STATE;
  529                                 exception = EXC_BAD_INSTRUCTION;
  530                                 code = EXC_PPC_PRIVINST;
  531                                 subcode = (unsigned int)ssp->save_srr0;
  532                         } else if (ssp->save_srr1 & MASK(SRR1_PRG_TRAP)) {
  533                                 unsigned int inst;
  534                                 //char *iaddr;
  535                                 
  536                                 //iaddr = CAST_DOWN(char *, ssp->save_srr0);            /* Trim from long long and make a char pointer */ 
  537                                 if (copyin(ssp->save_srr0, (char *) &inst, 4 )) panic("copyin failed\n");
  538                                 
  539                                 if(dgWork.dgFlags & enaDiagTrap) {      /* Is the diagnostic trap enabled? */
  540                                         if((inst & 0xFFFFFFF0) == 0x0FFFFFF0) { /* Is this a TWI 31,R31,0xFFFx? */
  541                                                 if(diagTrap(ssp, inst & 0xF)) { /* Call the trap code */
  542                                                         ssp->save_srr0 += 4ULL; /* If we eat the trap, bump pc */
  543                                                         exception = 0;                  /* Clear exception */
  544                                                         break;                                  /* All done here */
  545                                                 }
  546                                         }
  547                                 }
  548                                 
  549                                 UPDATE_PPC_EXCEPTION_STATE;
  550                                 
  551                                 if (inst == 0x7FE00008) {
  552                                         exception = EXC_BREAKPOINT;
  553                                         code = EXC_PPC_BREAKPOINT;
  554                                 } else {
  555                                         exception = EXC_SOFTWARE;
  556                                         code = EXC_PPC_TRAP;
  557                                 }
  558                                 subcode = (unsigned int)ssp->save_srr0;
  559                         }
  560                         break;
  561                         
  562                 case T_ALTIVEC_ASSIST:
  563                         UPDATE_PPC_EXCEPTION_STATE;
  564                         exception = EXC_ARITHMETIC;
  565                         code = EXC_PPC_ALTIVECASSIST;
  566                         subcode = (unsigned int)ssp->save_srr0;
  567                         break;
  568 
  569                 case T_DATA_ACCESS:
  570                         map = thread->map;
  571 
  572                         if(ssp->save_dsisr & dsiInvMode) {                      /* Did someone try to reserve cache inhibited? */
  573                                 UPDATE_PPC_EXCEPTION_STATE;                             /* Don't even bother VM with this one */
  574                                 exception = EXC_BAD_ACCESS;
  575                                 subcode = (unsigned int)dar;
  576                                 break;
  577                         }
  578                         
  579                         code = vm_fault(map, vm_map_trunc_page(dar),
  580                                  dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO,
  581                                  FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
  582 
  583                         if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
  584                                 UPDATE_PPC_EXCEPTION_STATE;
  585                                 exception = EXC_BAD_ACCESS;
  586                                 subcode = (unsigned int)dar;
  587                         } else { 
  588                                 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
  589                                 ssp->save_dsisr = (ssp->save_dsisr & 
  590                                         ~((MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);   /* Make sure this is marked as a miss */
  591                         }
  592                         break;
  593                         
  594                 case T_INSTRUCTION_ACCESS:
  595                         /* Same as for data access, except fault type
  596                          * is PROT_EXEC and addr comes from srr0
  597                          */
  598                         map = thread->map;
  599                         
  600                         code = vm_fault(map, vm_map_trunc_page(ssp->save_srr0),
  601                                 PROT_EXEC, FALSE, THREAD_ABORTSAFE, NULL, vm_map_trunc_page(0));
  602 
  603                         if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) {
  604                                 UPDATE_PPC_EXCEPTION_STATE;
  605                                 exception = EXC_BAD_ACCESS;
  606                                 subcode = (unsigned int)ssp->save_srr0;
  607                         } else { 
  608                                 ssp->save_hdr.save_flags |= SAVredrive; /* Tell low-level to re-try fault */
  609                                 ssp->save_srr1 = (ssp->save_srr1 & 
  610                                         ~((unsigned long long)(MASK(DSISR_NOEX) | MASK(DSISR_PROT)))) | MASK(DSISR_HASH);               /* Make sure this is marked as a miss */
  611                         }
  612                         break;
  613 
  614                 case T_AST:
  615                         /* AST delivery is done below */
  616                         break;
  617                         
  618                 }
  619 #ifdef MACH_BSD
  620                 {
  621                 bsd_uprofil(&tv, ssp->save_srr0);
  622                 }
  623 #endif /* MACH_BSD */
  624         }
  625 
  626         if (exception) {
  627                 /* if this is the init task, save the exception information */
  628                 /* this probably is a fatal exception */
  629 #if 0
  630                 if(bsd_init_task == current_task()) {
  631                         char *buf;
  632                         int i;
  633 
  634                         buf = init_task_failure_data;
  635 
  636 
  637                         buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode);
  638                         buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%016llx\n"
  639                                                                 , dsisr, dar);
  640 
  641                         for (i=0; i<32; i++) {
  642                                 if ((i % 8) == 0) {
  643                                         buf += sprintf(buf, "\n%4d :",i);
  644                                 }
  645                                 buf += sprintf(buf, " %08x",*(&ssp->save_r0+i));
  646                         }
  647 
  648                         buf += sprintf(buf, "\n\n");
  649                         buf += sprintf(buf, "cr        = 0x%08X\t\t",ssp->save_cr);
  650                         buf += sprintf(buf, "xer       = 0x%08X\n",ssp->save_xer);
  651                         buf += sprintf(buf, "lr        = 0x%016llX\t\t",ssp->save_lr);
  652                         buf += sprintf(buf, "ctr       = 0x%016llX\n",ssp->save_ctr); 
  653                         buf += sprintf(buf, "srr0(iar) = 0x%016llX\t\t",ssp->save_srr0);
  654                         buf += sprintf(buf, "srr1(msr) = 0x%016llX\n",ssp->save_srr1,
  655                            "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18"
  656                            "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT");
  657                         buf += sprintf(buf, "\n\n");
  658 
  659                         /* generate some stack trace */
  660                         buf += sprintf(buf, "Application level back trace:\n");
  661                         if (ssp->save_srr1 & MASK(MSR_PR)) { 
  662                            char *addr = (char*)ssp->save_r1;
  663                            unsigned int stack_buf[3];
  664                            for (i = 0; i < 8; i++) {
  665                                 if (addr == (char*)NULL)
  666                                         break;
  667                                 if (!copyin(ssp->save_r1,(char*)stack_buf, 
  668                                                         3 * sizeof(int))) {
  669                                         buf += sprintf(buf, "0x%08X : 0x%08X\n"
  670                                                 ,addr,stack_buf[2]);
  671                                         addr = (char*)stack_buf[0];
  672                                 } else {
  673                                         break;
  674                                 }
  675                            }
  676                         }
  677                         buf[0] = '\0';
  678                 }
  679 #endif
  680                 doexception(exception, code, subcode);
  681         }
  682         /* AST delivery
  683          * Check to see if we need an AST, if so take care of it here
  684          */
  685         ml_set_interrupts_enabled(FALSE);
  686 
  687         if (USER_MODE(ssp->save_srr1)) {
  688                 myast = ast_pending();
  689                 while (*myast & AST_ALL) {
  690                         ast_taken(AST_ALL, intr);
  691                         ml_set_interrupts_enabled(FALSE);
  692                         myast = ast_pending();
  693                 }
  694         }
  695 
  696         return ssp;
  697 }
  698 
  699 /* This routine is called from assembly before each and every system call.
  700  * It must preserve r3.
  701  */
  702 
  703 extern int syscall_trace(int, struct savearea *);
  704 
  705 
  706 extern int pmdebug;
  707 
  708 int syscall_trace(int retval, struct savearea *ssp)
  709 {
  710         int i, argc;
  711         int kdarg[3];
  712 /* Always prepare to trace mach system calls */
  713 
  714         kdarg[0]=0;
  715         kdarg[1]=0;
  716         kdarg[2]=0;
  717         
  718         argc = mach_trap_table[-((unsigned int)ssp->save_r0)].mach_trap_arg_count;
  719         
  720         if (argc > 3)
  721                 argc = 3;
  722         
  723         for (i=0; i < argc; i++)
  724                 kdarg[i] = (int)*(&ssp->save_r3 + i);
  725         
  726         KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->save_r0))) | DBG_FUNC_START,
  727                 kdarg[0], kdarg[1], kdarg[2], 0, 0);
  728 
  729         return retval;
  730 }
  731 
  732 /* This routine is called from assembly after each mach system call
  733  * It must preserve r3.
  734  */
  735 
  736 extern int syscall_trace_end(int, struct savearea *);
  737 
  738 int syscall_trace_end(int retval, struct savearea *ssp)
  739 {
  740         KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-((unsigned int)ssp->save_r0))) | DBG_FUNC_END,
  741                 retval, 0, 0, 0, 0);
  742         return retval;
  743 }
  744 
  745 /*
  746  * called from syscall if there is an error
  747  */
  748 
  749 int syscall_error(
  750         int exception,
  751         int code,
  752         int subcode,
  753         struct savearea *ssp)
  754 {
  755         register thread_t thread;
  756 
  757         thread = current_thread();
  758 
  759         if (thread == 0)
  760             panic("syscall error in boot phase");
  761 
  762         if (!USER_MODE(ssp->save_srr1))
  763                 panic("system call called from kernel");
  764 
  765         doexception(exception, code, subcode);
  766 
  767         return 0;
  768 }
  769 
  770 /* Pass up a server syscall/exception */
  771 void
  772 doexception(
  773             int exc,
  774             int code,
  775             int sub)
  776 {
  777         exception_data_type_t   codes[EXCEPTION_CODE_MAX];
  778 
  779         codes[0] = code;        
  780         codes[1] = sub;
  781         exception_triage(exc, codes, 2);
  782 }
  783 
  784 char *trap_type[] = {
  785         "Unknown",
  786         "0x100 - System reset",
  787         "0x200 - Machine check",
  788         "0x300 - Data access",
  789         "0x400 - Inst access",
  790         "0x500 - Ext int",
  791         "0x600 - Alignment",
  792         "0x700 - Program",
  793         "0x800 - Floating point",
  794         "0x900 - Decrementer",
  795         "0xA00 - n/a",
  796         "0xB00 - n/a",
  797         "0xC00 - System call",
  798         "0xD00 - Trace",
  799         "0xE00 - FP assist",
  800         "0xF00 - Perf mon",
  801         "0xF20 - VMX",
  802         "INVALID EXCEPTION",
  803         "INVALID EXCEPTION",
  804         "INVALID EXCEPTION",
  805         "0x1300 - Inst bkpnt",
  806         "0x1400 - Sys mgmt",
  807         "0x1600 - Altivec Assist",
  808         "0x1700 - Thermal",
  809         "INVALID EXCEPTION",
  810         "INVALID EXCEPTION",
  811         "INVALID EXCEPTION",
  812         "INVALID EXCEPTION",
  813         "INVALID EXCEPTION",
  814         "INVALID EXCEPTION",
  815         "INVALID EXCEPTION",
  816         "INVALID EXCEPTION",
  817         "Emulate",
  818         "0x2000 - Run Mode/Trace",
  819         "Signal Processor",
  820         "Preemption",
  821         "Context Switch",
  822         "Shutdown",
  823         "System Failure"
  824 };
  825 int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]);
  826 
  827 void unresolved_kernel_trap(int trapno,
  828                             struct savearea *ssp,
  829                             unsigned int dsisr,
  830                             addr64_t dar,
  831                             const char *message)
  832 {
  833         char *trap_name;
  834         extern void print_backtrace(struct savearea *);
  835         extern unsigned int debug_mode, disableDebugOuput;
  836         extern unsigned long panic_caller;
  837 
  838         ml_set_interrupts_enabled(FALSE);                                       /* Turn off interruptions */
  839         lastTrace = LLTraceSet(0);                                                      /* Disable low-level tracing */
  840 
  841         if( logPanicDataToScreen )
  842                 disableDebugOuput = FALSE;
  843 
  844         debug_mode++;
  845         if ((unsigned)trapno <= T_MAX)
  846                 trap_name = trap_type[trapno / T_VECTOR_SIZE];
  847         else
  848                 trap_name = "???? unrecognized exception";
  849         if (message == NULL)
  850                 message = trap_name;
  851 
  852         kdb_printf("\n\nUnresolved kernel trap(cpu %d): %s DAR=0x%016llX PC=0x%016llX\n",
  853                cpu_number(), trap_name, dar, ssp->save_srr0);
  854 
  855         print_backtrace(ssp);
  856 
  857         panic_caller = (0xFFFF0000 | (trapno / T_VECTOR_SIZE) );
  858         draw_panic_dialog();
  859                 
  860         if( panicDebugging )
  861                 (void *)Call_Debugger(trapno, ssp);
  862         panic(message);
  863 }
  864 
  865 const char *corr[2] = {"uncorrected", "corrected  "};
  866 
  867 void handleMck(struct savearea *ssp) {                                  /* Common machine check handler */
  868 
  869         int cpu;
  870         
  871         cpu = cpu_number();
  872 
  873         printf("Machine check (%d) - %s - pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n",
  874                 cpu, corr[ssp->save_hdr.save_misc3], ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar);           /* Tell us about it */
  875         printf("Machine check (%d) -   AsyncSrc = %016llX, CoreFIR = %016llx\n", cpu, ssp->save_xdat0, ssp->save_xdat1);
  876         printf("Machine check (%d) -      L2FIR = %016llX,  BusFir = %016llx\n", cpu, ssp->save_xdat2, ssp->save_xdat3);
  877         
  878         if(ssp->save_hdr.save_misc3) return;                            /* Leave the the machine check was recovered */
  879 
  880         panic("Uncorrectable machine check: pc = %016llX, msr = %016llX, dsisr = %08X, dar = %016llX\n"
  881               "  AsyncSrc = %016llX, CoreFIR = %016llx\n"
  882               "     L2FIR = %016llX,  BusFir = %016llx\n",
  883                   ssp->save_srr0, ssp->save_srr1, ssp->save_dsisr, ssp->save_dar, 
  884                   ssp->save_xdat0, ssp->save_xdat1, ssp->save_xdat2, ssp->save_xdat3);
  885         
  886         return;
  887 }
  888 
  889 void
  890 thread_syscall_return(
  891         kern_return_t ret)
  892 {
  893         register thread_t   thread = current_thread();
  894         register struct savearea *regs = USER_REGS(thread);
  895 
  896         if (kdebug_enable && ((unsigned int)regs->save_r0 & 0x80000000)) {
  897           /* Mach trap */
  898           KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->save_r0))) | DBG_FUNC_END,
  899                        ret, 0, 0, 0, 0);
  900         }           
  901         regs->save_r3 = ret;
  902 
  903         thread_exception_return();
  904         /*NOTREACHED*/
  905 }
  906 
  907 
  908 #if     MACH_KDB
  909 void
  910 thread_kdb_return(void)
  911 {
  912         register thread_t       thread = current_thread();
  913         register struct savearea *regs = USER_REGS(thread);
  914 
  915         Call_Debugger(thread->machine.pcb->save_exception, regs);
  916         thread_exception_return();
  917         /*NOTREACHED*/
  918 }
  919 #endif  /* MACH_KDB */

Cache object: e3b532a96eef0bf34dc420be9438447c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.