The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/bsd/kern/kdebug.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 
   26 #include <machine/spl.h>
   27 
   28 #define HZ      100
   29 #include <mach/clock_types.h>
   30 #include <mach/mach_types.h>
   31 #include <mach/mach_time.h>
   32 #include <machine/machine_routines.h>
   33 
   34 #include <sys/kdebug.h>
   35 #include <sys/errno.h>
   36 #include <sys/param.h>
   37 #include <sys/proc.h>
   38 #include <sys/vm.h>
   39 #include <sys/sysctl.h>
   40 
   41 #include <kern/thread.h>
   42 #include <kern/task.h>
   43 #include <vm/vm_kern.h>
   44 #include <sys/lock.h>
   45 
   46 /* trace enable status */
   47 unsigned int kdebug_enable = 0;
   48 
   49 /* track timestamps for security server's entropy needs */
   50 uint64_t *                kd_entropy_buffer = 0;
   51 unsigned int      kd_entropy_bufsize = 0;
   52 unsigned int      kd_entropy_count  = 0;
   53 unsigned int      kd_entropy_indx   = 0;
   54 unsigned int      kd_entropy_buftomem = 0;
   55 
   56 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
   57 kd_buf * kd_bufptr;
   58 unsigned int kd_buftomem=0;
   59 kd_buf * kd_buffer=0;
   60 kd_buf * kd_buflast;
   61 kd_buf * kd_readlast;
   62 unsigned int nkdbufs = 8192;
   63 unsigned int kd_bufsize = 0;
   64 unsigned int kdebug_flags = 0;
   65 unsigned int kdebug_nolog=1;
   66 unsigned int kdlog_beg=0;
   67 unsigned int kdlog_end=0;
   68 unsigned int kdlog_value1=0;
   69 unsigned int kdlog_value2=0;
   70 unsigned int kdlog_value3=0;
   71 unsigned int kdlog_value4=0;
   72 
   73 unsigned long long kd_prev_timebase = 0LL;
   74 decl_simple_lock_data(,kd_trace_lock);
   75 
   76 kd_threadmap *kd_mapptr = 0;
   77 unsigned int kd_mapsize = 0;
   78 unsigned int kd_mapcount = 0;
   79 unsigned int kd_maptomem = 0;
   80 
   81 pid_t global_state_pid = -1;       /* Used to control exclusive use of kd_buffer */
   82 
   83 #define DBG_FUNC_MASK 0xfffffffc
   84 
   85 #ifdef ppc
   86 extern natural_t rtclock_decrementer_min;
   87 #endif /* ppc */
   88 
   89 struct kdebug_args {
   90   int code;
   91   int arg1;
   92   int arg2;
   93   int arg3;
   94   int arg4;
   95   int arg5;
   96 };
   97 
   98 /* task to string structure */
   99 struct tts
  100 {
  101   task_t   *task;            /* from procs task */
  102   pid_t     pid;             /* from procs p_pid  */
  103   char      task_comm[20];   /* from procs p_comm */
  104 };
  105 
  106 typedef struct tts tts_t;
  107 
  108 struct krt
  109 {
  110   kd_threadmap *map;    /* pointer to the map buffer */
  111   int count;
  112   int maxcount;
  113   struct tts *atts;
  114 };
  115 
  116 typedef struct krt krt_t;
  117 
  118 /* This is for the CHUD toolkit call */
  119 typedef void (*kd_chudhook_fn) (unsigned int debugid, unsigned int arg1,
  120                                 unsigned int arg2, unsigned int arg3,
  121                                 unsigned int arg4, unsigned int arg5);
  122 
  123 kd_chudhook_fn kdebug_chudhook = 0;   /* pointer to CHUD toolkit function */
  124 
  125 /* Support syscall SYS_kdebug_trace */
  126 kdebug_trace(p, uap, retval)
  127      struct proc *p;
  128      struct kdebug_args *uap;
  129      register_t *retval;
  130 {
  131   if (kdebug_nolog)
  132     return(EINVAL);
  133   
  134   kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0);
  135   return(0);
  136 }
  137 
  138 
  139 void
  140 kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5)
  141 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
  142 {
  143         kd_buf * kd;
  144         struct proc *curproc;
  145         int      s;
  146         unsigned long long now;
  147         mach_timespec_t *tsp;
  148 
  149         if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
  150               if (kdebug_chudhook)
  151                     kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
  152 
  153               if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
  154                     (kdebug_enable & KDEBUG_ENABLE_TRACE)))
  155                 return;
  156         }
  157 
  158         s = ml_set_interrupts_enabled(FALSE);
  159 
  160         if (kdebug_enable & KDEBUG_ENABLE_ENTROPY)
  161           {
  162             if (kd_entropy_indx < kd_entropy_count)
  163               {
  164                 kd_entropy_buffer [ kd_entropy_indx] = mach_absolute_time();
  165                 kd_entropy_indx++;
  166               }
  167             
  168             if (kd_entropy_indx == kd_entropy_count)
  169               {
  170                 /* Disable entropy collection */
  171                 kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
  172               }
  173           }
  174 
  175         if (kdebug_nolog)
  176           {
  177             ml_set_interrupts_enabled(s);
  178             return;
  179           }
  180 
  181         usimple_lock(&kd_trace_lock);
  182         if (kdebug_flags & KDBG_PIDCHECK)
  183           {
  184             /* If kdebug flag is not set for current proc, return  */
  185             curproc = current_proc();
  186             if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
  187                 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
  188               {
  189                 usimple_unlock(&kd_trace_lock);
  190                 ml_set_interrupts_enabled(s);
  191                 return;
  192               }
  193           }
  194         else if (kdebug_flags & KDBG_PIDEXCLUDE)
  195           {
  196             /* If kdebug flag is set for current proc, return  */
  197             curproc = current_proc();
  198             if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
  199                 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
  200               {
  201                 usimple_unlock(&kd_trace_lock);
  202                 ml_set_interrupts_enabled(s);
  203                 return;
  204               }
  205           }
  206 
  207         if (kdebug_flags & KDBG_RANGECHECK)
  208           {
  209             if ((debugid < kdlog_beg) || (debugid > kdlog_end) 
  210                 && (debugid >> 24 != DBG_TRACE))
  211               {
  212                 usimple_unlock(&kd_trace_lock);
  213                 ml_set_interrupts_enabled(s);
  214                 return;
  215               }
  216           }
  217         else if (kdebug_flags & KDBG_VALCHECK)
  218           {
  219             if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
  220                 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
  221                 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
  222                 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
  223                 (debugid >> 24 != DBG_TRACE))
  224               {
  225                 usimple_unlock(&kd_trace_lock);
  226                 ml_set_interrupts_enabled(s);
  227                 return;
  228               }
  229           }
  230         kd = kd_bufptr;
  231         kd->debugid = debugid;
  232         kd->arg1 = arg1;
  233         kd->arg2 = arg2;
  234         kd->arg3 = arg3;
  235         kd->arg4 = arg4;
  236         kd->arg5 = (int)current_act();
  237         if (cpu_number())
  238             kd->arg5 |= KDBG_CPU_MASK;
  239                   
  240         now = kd->timestamp = mach_absolute_time();
  241 
  242         /* Watch for out of order timestamps */ 
  243 
  244         if (now < kd_prev_timebase)
  245           {
  246             kd->timestamp = ++kd_prev_timebase;
  247           }
  248         else
  249           {
  250             /* Then just store the previous timestamp */
  251             kd_prev_timebase = now;
  252           }
  253 
  254 
  255         kd_bufptr++;
  256 
  257         if (kd_bufptr >= kd_buflast)
  258                 kd_bufptr = kd_buffer;
  259         if (kd_bufptr == kd_readlast) {
  260                 if (kdebug_flags & KDBG_NOWRAP)
  261                         kdebug_nolog = 1;
  262                 kdebug_flags |= KDBG_WRAPPED;
  263         }
  264         usimple_unlock(&kd_trace_lock);
  265         ml_set_interrupts_enabled(s);
  266 }
  267 
  268 void
  269 kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5)
  270 unsigned int debugid, arg1, arg2, arg3, arg4, arg5;
  271 {
  272         kd_buf * kd;
  273         struct proc *curproc;
  274         int      s;
  275         unsigned long long now;
  276         mach_timespec_t *tsp;
  277 
  278         if (kdebug_enable & KDEBUG_ENABLE_CHUD) {
  279               if (kdebug_chudhook)
  280                     (void)kdebug_chudhook(debugid, arg1, arg2, arg3, arg4, arg5);
  281 
  282               if (!((kdebug_enable & KDEBUG_ENABLE_ENTROPY) ||
  283                     (kdebug_enable & KDEBUG_ENABLE_TRACE)))
  284                 return;
  285         }
  286 
  287         s = ml_set_interrupts_enabled(FALSE);
  288 
  289         if (kdebug_nolog)
  290           {
  291             ml_set_interrupts_enabled(s);
  292             return;
  293           }
  294 
  295         usimple_lock(&kd_trace_lock);
  296         if (kdebug_flags & KDBG_PIDCHECK)
  297           {
  298             /* If kdebug flag is not set for current proc, return  */
  299             curproc = current_proc();
  300             if ((curproc && !(curproc->p_flag & P_KDEBUG)) &&
  301                 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
  302               {
  303                 usimple_unlock(&kd_trace_lock);
  304                 ml_set_interrupts_enabled(s);
  305                 return;
  306               }
  307           }
  308         else if (kdebug_flags & KDBG_PIDEXCLUDE)
  309           {
  310             /* If kdebug flag is set for current proc, return  */
  311             curproc = current_proc();
  312             if ((curproc && (curproc->p_flag & P_KDEBUG)) &&
  313                 ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)))
  314               {
  315                 usimple_unlock(&kd_trace_lock);
  316                 ml_set_interrupts_enabled(s);
  317                 return;
  318               }
  319           }
  320 
  321         if (kdebug_flags & KDBG_RANGECHECK)
  322           {
  323             if ((debugid < kdlog_beg) || (debugid > kdlog_end)
  324                 && (debugid >> 24 != DBG_TRACE))
  325               {
  326                 usimple_unlock(&kd_trace_lock);
  327                 ml_set_interrupts_enabled(s);
  328                 return;
  329               }
  330           }
  331         else if (kdebug_flags & KDBG_VALCHECK)
  332           {
  333             if ((debugid & DBG_FUNC_MASK) != kdlog_value1 &&
  334                 (debugid & DBG_FUNC_MASK) != kdlog_value2 &&
  335                 (debugid & DBG_FUNC_MASK) != kdlog_value3 &&
  336                 (debugid & DBG_FUNC_MASK) != kdlog_value4 &&
  337                 (debugid >> 24 != DBG_TRACE))
  338               {
  339                 usimple_unlock(&kd_trace_lock);
  340                 ml_set_interrupts_enabled(s);
  341                 return;
  342               }
  343           }
  344 
  345         kd = kd_bufptr;
  346         kd->debugid = debugid;
  347         kd->arg1 = arg1;
  348         kd->arg2 = arg2;
  349         kd->arg3 = arg3;
  350         kd->arg4 = arg4;
  351         kd->arg5 = arg5;
  352         now = kd->timestamp = mach_absolute_time();
  353 
  354         /* Watch for out of order timestamps */ 
  355 
  356         if (now < kd_prev_timebase)
  357           {
  358             /* timestamps are out of order -- adjust */
  359             kd->timestamp = ++kd_prev_timebase;
  360           }
  361         else
  362           {
  363             /* Then just store the previous timestamp */
  364             kd_prev_timebase = now;
  365           }
  366 
  367         kd_bufptr++;
  368 
  369         if (kd_bufptr >= kd_buflast)
  370                 kd_bufptr = kd_buffer;
  371         if (kd_bufptr == kd_readlast) {
  372                 if (kdebug_flags & KDBG_NOWRAP)
  373                         kdebug_nolog = 1;
  374                 kdebug_flags |= KDBG_WRAPPED;
  375         }
  376         usimple_unlock(&kd_trace_lock);
  377         ml_set_interrupts_enabled(s);
  378 }
  379 
  380 
  381 kdbg_bootstrap()
  382 {
  383         kd_bufsize = nkdbufs * sizeof(kd_buf);
  384         if (kmem_alloc(kernel_map, &kd_buftomem,
  385                               (vm_size_t)kd_bufsize) == KERN_SUCCESS) 
  386         kd_buffer = (kd_buf *) kd_buftomem;
  387         else kd_buffer= (kd_buf *) 0;
  388         kdebug_flags &= ~KDBG_WRAPPED;
  389         if (kd_buffer) {
  390                 simple_lock_init(&kd_trace_lock);
  391                 kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT);
  392                 kd_bufptr = kd_buffer;
  393                 kd_buflast = &kd_bufptr[nkdbufs];
  394                 kd_readlast = kd_bufptr;
  395                 kd_prev_timebase = 0LL;
  396                 return(0);
  397         } else {
  398                 kd_bufsize=0;
  399                 kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT);
  400                 return(EINVAL);
  401         }
  402         
  403 }
  404 
  405 kdbg_reinit()
  406 {
  407     int x;
  408     int ret=0;
  409 
  410     /* Disable trace collecting */
  411     kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
  412     kdebug_nolog = 1;
  413 
  414     if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer)
  415         kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
  416 
  417     if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
  418       {
  419         kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
  420         kdebug_flags &= ~KDBG_MAPINIT;
  421         kd_mapsize = 0;
  422         kd_mapptr = (kd_threadmap *) 0;
  423         kd_mapcount = 0;
  424       }  
  425 
  426     ret= kdbg_bootstrap();
  427 
  428     return(ret);
  429 }
  430 
  431 void kdbg_trace_data(struct proc *proc, long *arg_pid)
  432 {
  433     if (!proc)
  434         *arg_pid = 0;
  435     else
  436         *arg_pid = proc->p_pid;
  437     
  438     return;
  439 }
  440 
  441 
  442 void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4)
  443 {
  444     int i;
  445     char *dbg_nameptr; 
  446     int dbg_namelen;
  447     long dbg_parms[4];
  448 
  449     if (!proc)
  450       {
  451         *arg1 = 0;
  452         *arg2 = 0;
  453         *arg3 = 0;
  454         *arg4 = 0;
  455         return;
  456       }
  457 
  458     /* Collect the pathname for tracing */
  459     dbg_nameptr = proc->p_comm;
  460     dbg_namelen = strlen(proc->p_comm);
  461     dbg_parms[0]=0L;
  462     dbg_parms[1]=0L;
  463     dbg_parms[2]=0L;
  464     dbg_parms[3]=0L;
  465   
  466     if(dbg_namelen > sizeof(dbg_parms))
  467       dbg_namelen = sizeof(dbg_parms);
  468     
  469     for(i=0;dbg_namelen > 0; i++)
  470       {
  471         dbg_parms[i]=*(long*)dbg_nameptr;
  472         dbg_nameptr += sizeof(long);
  473         dbg_namelen -= sizeof(long);
  474       }
  475 
  476     *arg1=dbg_parms[0];
  477     *arg2=dbg_parms[1];
  478     *arg3=dbg_parms[2];
  479     *arg4=dbg_parms[3];
  480 }
  481 
  482 kdbg_resolve_map(thread_act_t th_act, krt_t *t)
  483 {
  484   kd_threadmap *mapptr;
  485 
  486   if(t->count < t->maxcount)
  487     {
  488       mapptr=&t->map[t->count];
  489       mapptr->thread  = (unsigned int)th_act;
  490       (void) strncpy (mapptr->command, t->atts->task_comm,
  491                       sizeof(t->atts->task_comm)-1);
  492       mapptr->command[sizeof(t->atts->task_comm)-1] = '\0';
  493 
  494       /*
  495         Some kernel threads have no associated pid.
  496         We still need to mark the entry as valid.
  497       */
  498       if (t->atts->pid)
  499           mapptr->valid = t->atts->pid;
  500       else
  501           mapptr->valid = 1;
  502 
  503       t->count++;
  504     }
  505 }
  506 
  507 void kdbg_mapinit()
  508 {
  509         struct proc *p;
  510         struct krt akrt;
  511         int tts_count;    /* number of task-to-string structures */
  512         struct tts *tts_mapptr;
  513         unsigned int tts_mapsize = 0;
  514         unsigned int tts_maptomem=0;
  515         int i;
  516 
  517 
  518         if (kdebug_flags & KDBG_MAPINIT)
  519           return;
  520 
  521         /* Calculate the sizes of map buffers*/
  522         for (p = allproc.lh_first, kd_mapcount=0, tts_count=0; p; 
  523              p = p->p_list.le_next)
  524           {
  525             kd_mapcount += get_task_numacts((task_t)p->task);
  526             tts_count++;
  527           }
  528 
  529         /*
  530          * The proc count could change during buffer allocation,
  531          * so introduce a small fudge factor to bump up the
  532          * buffer sizes. This gives new tasks some chance of 
  533          * making into the tables.  Bump up by 10%.
  534          */
  535         kd_mapcount += kd_mapcount/10;
  536         tts_count += tts_count/10;
  537 
  538         kd_mapsize = kd_mapcount * sizeof(kd_threadmap);
  539         if((kmem_alloc(kernel_map, & kd_maptomem,
  540                        (vm_size_t)kd_mapsize) == KERN_SUCCESS))
  541         {
  542             kd_mapptr = (kd_threadmap *) kd_maptomem;
  543             bzero(kd_mapptr, kd_mapsize);
  544         }
  545         else
  546             kd_mapptr = (kd_threadmap *) 0;
  547 
  548         tts_mapsize = tts_count * sizeof(struct tts);
  549         if((kmem_alloc(kernel_map, & tts_maptomem,
  550                        (vm_size_t)tts_mapsize) == KERN_SUCCESS))
  551         {
  552             tts_mapptr = (struct tts *) tts_maptomem;
  553             bzero(tts_mapptr, tts_mapsize);
  554         }
  555         else
  556             tts_mapptr = (struct tts *) 0;
  557 
  558 
  559         /* 
  560          * We need to save the procs command string
  561          * and take a reference for each task associated
  562          * with a valid process
  563          */
  564 
  565         if (tts_mapptr) {
  566                 for (p = allproc.lh_first, i=0; p && i < tts_count; 
  567                      p = p->p_list.le_next) {
  568                         if (p->p_flag & P_WEXIT)
  569                                 continue;
  570 
  571                         if (task_reference_try(p->task)) {
  572                                 tts_mapptr[i].task = p->task;
  573                                 tts_mapptr[i].pid  = p->p_pid;
  574                                 (void)strncpy(&tts_mapptr[i].task_comm, p->p_comm, sizeof(tts_mapptr[i].task_comm) - 1);
  575                                 i++;
  576                         }
  577                 }
  578                 tts_count = i;
  579         }
  580 
  581 
  582         if (kd_mapptr && tts_mapptr)
  583           {
  584             kdebug_flags |= KDBG_MAPINIT;
  585             /* Initialize thread map data */
  586             akrt.map = kd_mapptr;
  587             akrt.count = 0;
  588             akrt.maxcount = kd_mapcount;
  589             
  590             for (i=0; i < tts_count; i++)
  591               {
  592                 akrt.atts = &tts_mapptr[i];
  593                 task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt);
  594                 task_deallocate((task_t) tts_mapptr[i].task);
  595               }
  596             kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize);
  597           }
  598 }
  599 
  600 kdbg_clear()
  601 {
  602 int x;
  603 
  604         /* Clean up the trace buffer */ 
  605         global_state_pid = -1;
  606         kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
  607         kdebug_nolog = 1;
  608         kdebug_flags &= ~KDBG_BUFINIT;
  609         kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  610         kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK);
  611         kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE);
  612         kmem_free(kernel_map, (vm_offset_t)kd_buffer, kd_bufsize);
  613         kd_buffer = (kd_buf *)0;
  614         kd_bufsize = 0;
  615         kd_prev_timebase = 0LL;
  616 
  617         /* Clean up the thread map buffer */
  618         kdebug_flags &= ~KDBG_MAPINIT;
  619         kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
  620         kd_mapptr = (kd_threadmap *) 0;
  621         kd_mapsize = 0;
  622         kd_mapcount = 0;
  623 }
  624 
  625 kdbg_setpid(kd_regtype *kdr)
  626 {
  627   pid_t pid;
  628   int flag, ret=0;
  629   struct proc *p;
  630 
  631   pid = (pid_t)kdr->value1;
  632   flag = (int)kdr->value2;
  633 
  634   if (pid > 0)
  635     {
  636       if ((p = pfind(pid)) == NULL)
  637         ret = ESRCH;
  638       else
  639         {
  640           if (flag == 1)  /* turn on pid check for this and all pids */
  641             {
  642               kdebug_flags |= KDBG_PIDCHECK;
  643               kdebug_flags &= ~KDBG_PIDEXCLUDE;
  644               p->p_flag |= P_KDEBUG;
  645             }
  646           else  /* turn off pid check for this pid value */
  647             {
  648               /* Don't turn off all pid checking though */
  649               /* kdebug_flags &= ~KDBG_PIDCHECK;*/   
  650               p->p_flag &= ~P_KDEBUG;
  651             }
  652         }
  653     }
  654   else
  655     ret = EINVAL;
  656   return(ret);
  657 }
  658 
  659 /* This is for pid exclusion in the trace buffer */
  660 kdbg_setpidex(kd_regtype *kdr)
  661 {
  662   pid_t pid;
  663   int flag, ret=0;
  664   struct proc *p;
  665 
  666   pid = (pid_t)kdr->value1;
  667   flag = (int)kdr->value2;
  668 
  669   if (pid > 0)
  670     {
  671       if ((p = pfind(pid)) == NULL)
  672         ret = ESRCH;
  673       else
  674         {
  675           if (flag == 1)  /* turn on pid exclusion */
  676             {
  677               kdebug_flags |= KDBG_PIDEXCLUDE;
  678               kdebug_flags &= ~KDBG_PIDCHECK;
  679               p->p_flag |= P_KDEBUG;
  680             }
  681           else  /* turn off pid exclusion for this pid value */
  682             {
  683               /* Don't turn off all pid exclusion though */
  684               /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/   
  685               p->p_flag &= ~P_KDEBUG;
  686             }
  687         }
  688     }
  689   else
  690     ret = EINVAL;
  691   return(ret);
  692 }
  693 
  694 /* This is for setting a minimum decrementer value */
  695 kdbg_setrtcdec(kd_regtype *kdr)
  696 {
  697   int ret=0;
  698   natural_t decval;
  699 
  700   decval = (natural_t)kdr->value1;
  701 
  702   if (decval && decval < KDBG_MINRTCDEC)
  703       ret = EINVAL;
  704 #ifdef ppc
  705   else
  706       rtclock_decrementer_min = decval;
  707 #else
  708   else
  709     ret = EOPNOTSUPP;
  710 #endif /* ppc */
  711 
  712   return(ret);
  713 }
  714 
  715 kdbg_setreg(kd_regtype * kdr)
  716 {
  717         int i,j, ret=0;
  718         unsigned int val_1, val_2, val;
  719         switch (kdr->type) {
  720         
  721         case KDBG_CLASSTYPE :
  722                 val_1 = (kdr->value1 & 0xff);
  723                 val_2 = (kdr->value2 & 0xff);
  724                 kdlog_beg = (val_1<<24);
  725                 kdlog_end = (val_2<<24);
  726                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  727                 kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
  728                 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
  729                 break;
  730         case KDBG_SUBCLSTYPE :
  731                 val_1 = (kdr->value1 & 0xff);
  732                 val_2 = (kdr->value2 & 0xff);
  733                 val = val_2 + 1;
  734                 kdlog_beg = ((val_1<<24) | (val_2 << 16));
  735                 kdlog_end = ((val_1<<24) | (val << 16));
  736                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  737                 kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
  738                 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
  739                 break;
  740         case KDBG_RANGETYPE :
  741                 kdlog_beg = (kdr->value1);
  742                 kdlog_end = (kdr->value2);
  743                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  744                 kdebug_flags &= ~KDBG_VALCHECK;       /* Turn off specific value check  */
  745                 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
  746                 break;
  747         case KDBG_VALCHECK:
  748                 kdlog_value1 = (kdr->value1);
  749                 kdlog_value2 = (kdr->value2);
  750                 kdlog_value3 = (kdr->value3);
  751                 kdlog_value4 = (kdr->value4);
  752                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  753                 kdebug_flags &= ~KDBG_RANGECHECK;    /* Turn off range check */
  754                 kdebug_flags |= KDBG_VALCHECK;       /* Turn on specific value check  */
  755                 break;
  756         case KDBG_TYPENONE :
  757                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  758                 kdlog_beg = 0;
  759                 kdlog_end = 0;
  760                 break;
  761         default :
  762                 ret = EINVAL;
  763                 break;
  764         }
  765         return(ret);
  766 }
  767 
  768 kdbg_getreg(kd_regtype * kdr)
  769 {
  770         int i,j, ret=0;
  771         unsigned int val_1, val_2, val;
  772 #if 0   
  773         switch (kdr->type) {
  774         case KDBG_CLASSTYPE :
  775                 val_1 = (kdr->value1 & 0xff);
  776                 val_2 = val_1 + 1;
  777                 kdlog_beg = (val_1<<24);
  778                 kdlog_end = (val_2<<24);
  779                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  780                 kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE);
  781                 break;
  782         case KDBG_SUBCLSTYPE :
  783                 val_1 = (kdr->value1 & 0xff);
  784                 val_2 = (kdr->value2 & 0xff);
  785                 val = val_2 + 1;
  786                 kdlog_beg = ((val_1<<24) | (val_2 << 16));
  787                 kdlog_end = ((val_1<<24) | (val << 16));
  788                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  789                 kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE);
  790                 break;
  791         case KDBG_RANGETYPE :
  792                 kdlog_beg = (kdr->value1);
  793                 kdlog_end = (kdr->value2);
  794                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  795                 kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE);
  796                 break;
  797         case KDBG_TYPENONE :
  798                 kdebug_flags &= (unsigned int)~KDBG_CKTYPES;
  799                 kdlog_beg = 0;
  800                 kdlog_end = 0;
  801                 break;
  802         default :
  803                 ret = EINVAL;
  804                 break;
  805         }
  806 #endif /* 0 */
  807         return(EINVAL);
  808 }
  809 
  810 
  811 
  812 kdbg_readmap(kd_threadmap *buffer, size_t *number)
  813 {
  814   int avail = *number;
  815   int ret = 0;
  816   int count = 0;
  817 
  818   count = avail/sizeof (kd_threadmap);
  819 
  820   if (count && (count <= kd_mapcount))
  821     {
  822       if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
  823         {
  824           if (*number < kd_mapsize)
  825             ret=EINVAL;
  826           else
  827             {
  828               if (copyout(kd_mapptr, buffer, kd_mapsize))
  829                 ret=EINVAL;
  830             }
  831         }
  832       else
  833         ret=EINVAL;
  834     }
  835   else
  836     ret=EINVAL;
  837 
  838   if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr)
  839     {
  840       kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize);
  841       kdebug_flags &= ~KDBG_MAPINIT;
  842       kd_mapsize = 0;
  843       kd_mapptr = (kd_threadmap *) 0;
  844       kd_mapcount = 0;
  845     }  
  846 
  847   return(ret);
  848 }
  849 
  850 kdbg_getentropy (mach_timespec_t * buffer, size_t *number, int ms_timeout)
  851 {
  852   int avail = *number;
  853   int ret = 0;
  854   int count = 0;     /* The number of timestamp entries that will fill buffer */
  855 
  856   if (kd_entropy_buffer)
  857     return(EBUSY);
  858 
  859   kd_entropy_count = avail/sizeof(mach_timespec_t);
  860   kd_entropy_bufsize = kd_entropy_count * sizeof(mach_timespec_t);
  861   kd_entropy_indx = 0;
  862 
  863   /* Enforce maximum entropy entries here if needed */
  864 
  865   /* allocate entropy buffer */
  866   if (kmem_alloc(kernel_map, &kd_entropy_buftomem,
  867                  (vm_size_t)kd_entropy_bufsize) == KERN_SUCCESS)
  868     {
  869       kd_entropy_buffer = (uint64_t *) kd_entropy_buftomem;
  870     }
  871   else
  872     {
  873       kd_entropy_buffer = (uint64_t *) 0;
  874       kd_entropy_count = 0;
  875       kd_entropy_indx = 0;
  876       return (EINVAL);
  877     }
  878 
  879   if (ms_timeout < 10)
  880     ms_timeout = 10;
  881 
  882   /* Enable entropy sampling */
  883   kdebug_enable |= KDEBUG_ENABLE_ENTROPY;
  884 
  885   ret = tsleep (kdbg_getentropy, PRIBIO | PCATCH, "kd_entropy", (ms_timeout/(1000/HZ)));
  886 
  887   /* Disable entropy sampling */
  888   kdebug_enable &= ~KDEBUG_ENABLE_ENTROPY;
  889 
  890   *number = 0;
  891   ret = 0;
  892 
  893   if (kd_entropy_indx > 0)
  894     {
  895       /* copyout the buffer */
  896       if (copyout(kd_entropy_buffer, buffer, kd_entropy_indx * sizeof(mach_timespec_t)))
  897           ret = EINVAL;
  898       else
  899           *number = kd_entropy_indx;
  900     }
  901 
  902   /* Always cleanup */
  903   kd_entropy_count = 0;
  904   kd_entropy_indx = 0;
  905   kd_entropy_buftomem = 0;
  906   kmem_free(kernel_map, (vm_offset_t)kd_entropy_buffer, kd_entropy_bufsize);
  907   kd_entropy_buffer = (uint64_t *) 0;
  908   return(ret);
  909 }
  910 
  911 
  912 /*
  913  * This function is provided for the CHUD toolkit only.
  914  *    int val:
  915  *        zero disables kdebug_chudhook function call
  916  *        non-zero enables kdebug_chudhook function call
  917  *    char *fn:
  918  *        address of the enabled kdebug_chudhook function
  919 */
  920 
  921 void kdbg_control_chud(int val, void *fn)
  922 {
  923         if (val) {
  924                 /* enable chudhook */
  925                 kdebug_enable |= KDEBUG_ENABLE_CHUD;
  926                 kdebug_chudhook = fn;
  927         }
  928         else {
  929                 /* disable chudhook */
  930                 kdebug_enable &= ~KDEBUG_ENABLE_CHUD;
  931                 kdebug_chudhook = 0;
  932         }
  933 }
  934 
  935         
  936 kdbg_control(name, namelen, where, sizep)
  937 int *name;
  938 u_int namelen;
  939 char *where;
  940 size_t *sizep;
  941 {
  942 int ret=0;
  943 int size=*sizep;
  944 int max_entries;
  945 unsigned int value = name[1];
  946 kd_regtype kd_Reg;
  947 kbufinfo_t kd_bufinfo;
  948 
  949 pid_t curpid;
  950 struct proc *p, *curproc;
  951 
  952        if (name[0] == KERN_KDGETBUF) {
  953            /* 
  954               Does not alter the global_state_pid
  955               This is a passive request.
  956            */
  957            if (size < sizeof(kd_bufinfo.nkdbufs)) {
  958              /* 
  959                 There is not enough room to return even
  960                 the first element of the info structure.
  961              */
  962              return(EINVAL);
  963            }
  964 
  965            kd_bufinfo.nkdbufs = nkdbufs;
  966            kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap);
  967            kd_bufinfo.nolog = kdebug_nolog;
  968            kd_bufinfo.flags = kdebug_flags;
  969            kd_bufinfo.bufid = global_state_pid;
  970            
  971            if(size >= sizeof(kbufinfo_t)) {
  972              /* Provide all the info we have */
  973              if(copyout (&kd_bufinfo, where, sizeof(kbufinfo_t)))
  974                return(EINVAL);
  975            }
  976            else {
  977              /* 
  978                 For backwards compatibility, only provide
  979                 as much info as there is room for.
  980              */
  981              if(copyout (&kd_bufinfo, where, size))
  982                return(EINVAL);
  983            }
  984            return(0);
  985        }
  986        else if (name[0] == KERN_KDGETENTROPY) {
  987          if (kd_entropy_buffer)
  988            return(EBUSY);
  989          else
  990            ret = kdbg_getentropy((mach_timespec_t *)where, sizep, value);
  991          return (ret);
  992        }
  993 
  994         if(curproc = current_proc())
  995           curpid = curproc->p_pid;
  996         else
  997           return (ESRCH);
  998 
  999         if (global_state_pid == -1)
 1000             global_state_pid = curpid;
 1001         else if (global_state_pid != curpid)
 1002           {
 1003             if((p = pfind(global_state_pid)) == NULL)
 1004               {
 1005                 /* The global pid no longer exists */
 1006                 global_state_pid = curpid;
 1007               }
 1008             else
 1009               {
 1010                 /* The global pid exists, deny this request */
 1011                 return(EBUSY);
 1012               }
 1013           }
 1014 
 1015         switch(name[0]) {
 1016                 case KERN_KDEFLAGS:
 1017                         value &= KDBG_USERFLAGS;
 1018                         kdebug_flags |= value;
 1019                         break;
 1020                 case KERN_KDDFLAGS:
 1021                         value &= KDBG_USERFLAGS;
 1022                         kdebug_flags &= ~value;
 1023                         break;
 1024                 case KERN_KDENABLE:    /* used to enable or disable */
 1025                   if (value)
 1026                     {
 1027                       /* enable only if buffer is initialized */
 1028                       if (!(kdebug_flags & KDBG_BUFINIT))
 1029                         {
 1030                           ret=EINVAL;
 1031                           break;
 1032                         }
 1033                     }
 1034 
 1035                   if (value)
 1036                     kdebug_enable |= KDEBUG_ENABLE_TRACE;
 1037                   else
 1038                     kdebug_enable &= ~KDEBUG_ENABLE_TRACE;
 1039 
 1040                   kdebug_nolog = (value)?0:1;
 1041 
 1042                   if (kdebug_enable & KDEBUG_ENABLE_TRACE)
 1043                       kdbg_mapinit();
 1044                   break;
 1045                 case KERN_KDSETBUF:
 1046                   /* We allow a maximum buffer size of 25% of either ram or max mapped address, whichever is smaller */
 1047                   /* 'value' is the desired number of trace entries */
 1048                         max_entries = (sane_size/4) / sizeof(kd_buf);
 1049                         if (value <= max_entries)
 1050                                 nkdbufs = value;
 1051                         else
 1052                           nkdbufs = max_entries;
 1053                         break;
 1054                 case KERN_KDSETUP:
 1055                         ret=kdbg_reinit();
 1056                         break;
 1057                 case KERN_KDREMOVE:
 1058                         kdbg_clear();
 1059                         break;
 1060                 case KERN_KDSETREG:
 1061                         if(size < sizeof(kd_regtype)) {
 1062                                 ret=EINVAL;
 1063                                 break;
 1064                         }
 1065                         if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
 1066                                 ret= EINVAL;
 1067                                 break;
 1068                         }
 1069                         ret = kdbg_setreg(&kd_Reg);
 1070                         break;
 1071                 case KERN_KDGETREG:
 1072                         if(size < sizeof(kd_regtype)) {
 1073                                 ret = EINVAL;
 1074                                 break;
 1075                         }
 1076                         ret = kdbg_getreg(&kd_Reg);
 1077                         if (copyout(&kd_Reg, where, sizeof(kd_regtype))){
 1078                                 ret=EINVAL;
 1079                         }
 1080                         break;
 1081                 case KERN_KDREADTR:
 1082                         ret = kdbg_read(where, sizep);
 1083                         break;
 1084                 case KERN_KDPIDTR:
 1085                         if (size < sizeof(kd_regtype)) {
 1086                                 ret = EINVAL;
 1087                                 break;
 1088                         }
 1089                         if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
 1090                                 ret= EINVAL;
 1091                                 break;
 1092                         }
 1093                         ret = kdbg_setpid(&kd_Reg);
 1094                         break;
 1095                 case KERN_KDPIDEX:
 1096                         if (size < sizeof(kd_regtype)) {
 1097                                 ret = EINVAL;
 1098                                 break;
 1099                         }
 1100                         if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
 1101                                 ret= EINVAL;
 1102                                 break;
 1103                         }
 1104                         ret = kdbg_setpidex(&kd_Reg);
 1105                         break;
 1106                 case KERN_KDTHRMAP:
 1107                         ret = kdbg_readmap((kd_threadmap *)where, sizep);
 1108                         break;
 1109                 case KERN_KDSETRTCDEC:
 1110                         if (size < sizeof(kd_regtype)) {
 1111                                 ret = EINVAL;
 1112                                 break;
 1113                         }
 1114                         if (copyin(where, &kd_Reg, sizeof(kd_regtype))) {
 1115                                 ret= EINVAL;
 1116                                 break;
 1117                         }
 1118                         ret = kdbg_setrtcdec(&kd_Reg);
 1119                         break;
 1120                        
 1121                 default:
 1122                         ret= EINVAL;
 1123         }
 1124         return(ret);
 1125 }
 1126 
 1127 kdbg_read(kd_buf * buffer, size_t *number)
 1128 {
 1129 int avail=*number;
 1130 int count=0;
 1131 int copycount=0;
 1132 int totalcount=0;
 1133 int s;
 1134 unsigned int my_kdebug_flags;
 1135 kd_buf * my_kd_bufptr;
 1136 
 1137         s = ml_set_interrupts_enabled(FALSE);
 1138         usimple_lock(&kd_trace_lock);
 1139         my_kdebug_flags = kdebug_flags;
 1140         my_kd_bufptr = kd_bufptr;
 1141         usimple_unlock(&kd_trace_lock);
 1142         ml_set_interrupts_enabled(s);
 1143 
 1144         count = avail/sizeof(kd_buf);
 1145         if (count) {
 1146                 if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) {
 1147                         if (count > nkdbufs)
 1148                                 count = nkdbufs;
 1149                         if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast))
 1150                           {
 1151                             copycount = my_kd_bufptr-kd_readlast;
 1152                             if (copycount > count)
 1153                               copycount = count;
 1154 
 1155                             if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
 1156                               {
 1157                                 *number = 0;
 1158                                 return(EINVAL);
 1159                               }
 1160                             kd_readlast += copycount;
 1161                             *number = copycount;
 1162                             return(0);
 1163                           }
 1164                         else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast))
 1165                           {
 1166                             *number = 0;
 1167                             return(0);
 1168                           }
 1169                         else
 1170                           {
 1171                             if (my_kdebug_flags & KDBG_WRAPPED)
 1172                               {
 1173                                 kd_readlast = my_kd_bufptr;
 1174                                 kdebug_flags &= ~KDBG_WRAPPED;
 1175                               }
 1176 
 1177                             /* Note that by setting kd_readlast equal to my_kd_bufptr,
 1178                                we now treat the kd_buffer read the same as if we weren't
 1179                                wrapped and my_kd_bufptr was less than kd_readlast.
 1180                             */
 1181 
 1182                             /* first copyout from readlast to end of kd_buffer */
 1183                             copycount = kd_buflast - kd_readlast;
 1184                             if (copycount > count)
 1185                               copycount = count;
 1186                             if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
 1187                               {
 1188                                 *number = 0;
 1189                                 return(EINVAL);
 1190                               }
 1191                             buffer += copycount;
 1192                             count -= copycount;
 1193                             totalcount = copycount;
 1194                             kd_readlast += copycount;
 1195                             if (kd_readlast == kd_buflast)
 1196                               kd_readlast = kd_buffer;
 1197                             if (count == 0)
 1198                               {
 1199                                 *number = totalcount;
 1200                                 return(0);
 1201                               }
 1202 
 1203                              /* second copyout from top of kd_buffer to bufptr */
 1204                             copycount = my_kd_bufptr - kd_readlast;
 1205                             if (copycount > count)
 1206                               copycount = count;
 1207                             if (copycount == 0)
 1208                               {
 1209                                 *number = totalcount;
 1210                                 return(0);
 1211                               }
 1212                             if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf)))
 1213                               {
 1214                                 return(EINVAL);
 1215                               }
 1216                             kd_readlast += copycount;
 1217                             totalcount += copycount;
 1218                             *number = totalcount;
 1219                             return(0);
 1220                           }
 1221                 } /* end if KDBG_BUFINIT */             
 1222         } /* end if count */
 1223         return (EINVAL);
 1224 }
 1225 
 1226 unsigned char *getProcName(struct proc *proc);
 1227 unsigned char *getProcName(struct proc *proc) {
 1228 
 1229         return (unsigned char *) &proc->p_comm; /* Return pointer to the proc name */
 1230 
 1231 }

Cache object: c165085026592569a58d47b424231d63


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.