The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_ktrace.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.
    4  * Copyright (c) 2005 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 4. Neither the name of the University nor the names of its contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  *      @(#)kern_ktrace.c       8.2 (Berkeley) 9/23/93
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 #include "opt_ktrace.h"
   38 
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/fcntl.h>
   42 #include <sys/kernel.h>
   43 #include <sys/kthread.h>
   44 #include <sys/lock.h>
   45 #include <sys/mutex.h>
   46 #include <sys/malloc.h>
   47 #include <sys/mount.h>
   48 #include <sys/namei.h>
   49 #include <sys/priv.h>
   50 #include <sys/proc.h>
   51 #include <sys/unistd.h>
   52 #include <sys/vnode.h>
   53 #include <sys/socket.h>
   54 #include <sys/stat.h>
   55 #include <sys/ktrace.h>
   56 #include <sys/sx.h>
   57 #include <sys/sysctl.h>
   58 #include <sys/sysent.h>
   59 #include <sys/syslog.h>
   60 #include <sys/sysproto.h>
   61 
   62 #include <security/mac/mac_framework.h>
   63 
   64 /*
   65  * The ktrace facility allows the tracing of certain key events in user space
   66  * processes, such as system calls, signal delivery, context switches, and
   67  * user generated events using utrace(2).  It works by streaming event
   68  * records and data to a vnode associated with the process using the
   69  * ktrace(2) system call.  In general, records can be written directly from
   70  * the context that generates the event.  One important exception to this is
   71  * during a context switch, where sleeping is not permitted.  To handle this
   72  * case, trace events are generated using in-kernel ktr_request records, and
   73  * then delivered to disk at a convenient moment -- either immediately, the
   74  * next traceable event, at system call return, or at process exit.
   75  *
   76  * When dealing with multiple threads or processes writing to the same event
   77  * log, ordering guarantees are weak: specifically, if an event has multiple
   78  * records (i.e., system call enter and return), they may be interlaced with
   79  * records from another event.  Process and thread ID information is provided
   80  * in the record, and user applications can de-interlace events if required.
   81  */
   82 
   83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
   84 
   85 #ifdef KTRACE
   86 
   87 #ifndef KTRACE_REQUEST_POOL
   88 #define KTRACE_REQUEST_POOL     100
   89 #endif
   90 
   91 struct ktr_request {
   92         struct  ktr_header ktr_header;
   93         void    *ktr_buffer;
   94         union {
   95                 struct  ktr_proc_ctor ktr_proc_ctor;
   96                 struct  ktr_syscall ktr_syscall;
   97                 struct  ktr_sysret ktr_sysret;
   98                 struct  ktr_genio ktr_genio;
   99                 struct  ktr_psig ktr_psig;
  100                 struct  ktr_csw ktr_csw;
  101                 struct  ktr_fault ktr_fault;
  102                 struct  ktr_faultend ktr_faultend;
  103         } ktr_data;
  104         STAILQ_ENTRY(ktr_request) ktr_list;
  105 };
  106 
  107 static int data_lengths[] = {
  108         0,                                      /* none */
  109         offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
  110         sizeof(struct ktr_sysret),              /* KTR_SYSRET */
  111         0,                                      /* KTR_NAMEI */
  112         sizeof(struct ktr_genio),               /* KTR_GENIO */
  113         sizeof(struct ktr_psig),                /* KTR_PSIG */
  114         sizeof(struct ktr_csw),                 /* KTR_CSW */
  115         0,                                      /* KTR_USER */
  116         0,                                      /* KTR_STRUCT */
  117         0,                                      /* KTR_SYSCTL */
  118         sizeof(struct ktr_proc_ctor),           /* KTR_PROCCTOR */
  119         0,                                      /* KTR_PROCDTOR */
  120         0,                                      /* unused */
  121         sizeof(struct ktr_fault),               /* KTR_FAULT */
  122         sizeof(struct ktr_faultend),            /* KTR_FAULTEND */
  123 };
  124 
  125 static STAILQ_HEAD(, ktr_request) ktr_free;
  126 
  127 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
  128 
  129 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
  130 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
  131 
  132 static u_int ktr_geniosize = PAGE_SIZE;
  133 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
  134 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
  135     0, "Maximum size of genio event payload");
  136 
  137 static int print_message = 1;
  138 static struct mtx ktrace_mtx;
  139 static struct sx ktrace_sx;
  140 
  141 static void ktrace_init(void *dummy);
  142 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
  143 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize);
  144 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type);
  145 static struct ktr_request *ktr_getrequest(int type);
  146 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
  147 static void ktr_freeproc(struct proc *p, struct ucred **uc,
  148     struct vnode **vp);
  149 static void ktr_freerequest(struct ktr_request *req);
  150 static void ktr_freerequest_locked(struct ktr_request *req);
  151 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
  152 static int ktrcanset(struct thread *,struct proc *);
  153 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
  154 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
  155 static void ktrprocctor_entered(struct thread *, struct proc *);
  156 
  157 /*
  158  * ktrace itself generates events, such as context switches, which we do not
  159  * wish to trace.  Maintain a flag, TDP_INKTRACE, on each thread to determine
  160  * whether or not it is in a region where tracing of events should be
  161  * suppressed.
  162  */
  163 static void
  164 ktrace_enter(struct thread *td)
  165 {
  166 
  167         KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
  168         td->td_pflags |= TDP_INKTRACE;
  169 }
  170 
  171 static void
  172 ktrace_exit(struct thread *td)
  173 {
  174 
  175         KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
  176         td->td_pflags &= ~TDP_INKTRACE;
  177 }
  178 
  179 static void
  180 ktrace_assert(struct thread *td)
  181 {
  182 
  183         KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
  184 }
  185 
  186 static void
  187 ktrace_init(void *dummy)
  188 {
  189         struct ktr_request *req;
  190         int i;
  191 
  192         mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
  193         sx_init(&ktrace_sx, "ktrace_sx");
  194         STAILQ_INIT(&ktr_free);
  195         for (i = 0; i < ktr_requestpool; i++) {
  196                 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
  197                 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
  198         }
  199 }
  200 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
  201 
  202 static int
  203 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
  204 {
  205         struct thread *td;
  206         u_int newsize, oldsize, wantsize;
  207         int error;
  208 
  209         /* Handle easy read-only case first to avoid warnings from GCC. */
  210         if (!req->newptr) {
  211                 oldsize = ktr_requestpool;
  212                 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
  213         }
  214 
  215         error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
  216         if (error)
  217                 return (error);
  218         td = curthread;
  219         ktrace_enter(td);
  220         oldsize = ktr_requestpool;
  221         newsize = ktrace_resize_pool(oldsize, wantsize);
  222         ktrace_exit(td);
  223         error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
  224         if (error)
  225                 return (error);
  226         if (wantsize > oldsize && newsize < wantsize)
  227                 return (ENOSPC);
  228         return (0);
  229 }
  230 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
  231     &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
  232 
  233 static u_int
  234 ktrace_resize_pool(u_int oldsize, u_int newsize)
  235 {
  236         STAILQ_HEAD(, ktr_request) ktr_new;
  237         struct ktr_request *req;
  238         int bound;
  239 
  240         print_message = 1;
  241         bound = newsize - oldsize;
  242         if (bound == 0)
  243                 return (ktr_requestpool);
  244         if (bound < 0) {
  245                 mtx_lock(&ktrace_mtx);
  246                 /* Shrink pool down to newsize if possible. */
  247                 while (bound++ < 0) {
  248                         req = STAILQ_FIRST(&ktr_free);
  249                         if (req == NULL)
  250                                 break;
  251                         STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
  252                         ktr_requestpool--;
  253                         free(req, M_KTRACE);
  254                 }
  255         } else {
  256                 /* Grow pool up to newsize. */
  257                 STAILQ_INIT(&ktr_new);
  258                 while (bound-- > 0) {
  259                         req = malloc(sizeof(struct ktr_request), M_KTRACE,
  260                             M_WAITOK);
  261                         STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list);
  262                 }
  263                 mtx_lock(&ktrace_mtx);
  264                 STAILQ_CONCAT(&ktr_free, &ktr_new);
  265                 ktr_requestpool += (newsize - oldsize);
  266         }
  267         mtx_unlock(&ktrace_mtx);
  268         return (ktr_requestpool);
  269 }
  270 
  271 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */
  272 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
  273     (sizeof((struct thread *)NULL)->td_name));
  274 
  275 static struct ktr_request *
  276 ktr_getrequest_entered(struct thread *td, int type)
  277 {
  278         struct ktr_request *req;
  279         struct proc *p = td->td_proc;
  280         int pm;
  281 
  282         mtx_lock(&ktrace_mtx);
  283         if (!KTRCHECK(td, type)) {
  284                 mtx_unlock(&ktrace_mtx);
  285                 return (NULL);
  286         }
  287         req = STAILQ_FIRST(&ktr_free);
  288         if (req != NULL) {
  289                 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
  290                 req->ktr_header.ktr_type = type;
  291                 if (p->p_traceflag & KTRFAC_DROP) {
  292                         req->ktr_header.ktr_type |= KTR_DROP;
  293                         p->p_traceflag &= ~KTRFAC_DROP;
  294                 }
  295                 mtx_unlock(&ktrace_mtx);
  296                 microtime(&req->ktr_header.ktr_time);
  297                 req->ktr_header.ktr_pid = p->p_pid;
  298                 req->ktr_header.ktr_tid = td->td_tid;
  299                 bcopy(td->td_name, req->ktr_header.ktr_comm,
  300                     sizeof(req->ktr_header.ktr_comm));
  301                 req->ktr_buffer = NULL;
  302                 req->ktr_header.ktr_len = 0;
  303         } else {
  304                 p->p_traceflag |= KTRFAC_DROP;
  305                 pm = print_message;
  306                 print_message = 0;
  307                 mtx_unlock(&ktrace_mtx);
  308                 if (pm)
  309                         printf("Out of ktrace request objects.\n");
  310         }
  311         return (req);
  312 }
  313 
  314 static struct ktr_request *
  315 ktr_getrequest(int type)
  316 {
  317         struct thread *td = curthread;
  318         struct ktr_request *req;
  319 
  320         ktrace_enter(td);
  321         req = ktr_getrequest_entered(td, type);
  322         if (req == NULL)
  323                 ktrace_exit(td);
  324 
  325         return (req);
  326 }
  327 
  328 /*
  329  * Some trace generation environments don't permit direct access to VFS,
  330  * such as during a context switch where sleeping is not allowed.  Under these
  331  * circumstances, queue a request to the thread to be written asynchronously
  332  * later.
  333  */
  334 static void
  335 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
  336 {
  337 
  338         mtx_lock(&ktrace_mtx);
  339         STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
  340         mtx_unlock(&ktrace_mtx);
  341 }
  342 
  343 /*
  344  * Drain any pending ktrace records from the per-thread queue to disk.  This
  345  * is used both internally before committing other records, and also on
  346  * system call return.  We drain all the ones we can find at the time when
  347  * drain is requested, but don't keep draining after that as those events
  348  * may be approximately "after" the current event.
  349  */
  350 static void
  351 ktr_drain(struct thread *td)
  352 {
  353         struct ktr_request *queued_req;
  354         STAILQ_HEAD(, ktr_request) local_queue;
  355 
  356         ktrace_assert(td);
  357         sx_assert(&ktrace_sx, SX_XLOCKED);
  358 
  359         STAILQ_INIT(&local_queue);      /* XXXRW: needed? */
  360 
  361         if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
  362                 mtx_lock(&ktrace_mtx);
  363                 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
  364                 mtx_unlock(&ktrace_mtx);
  365 
  366                 while ((queued_req = STAILQ_FIRST(&local_queue))) {
  367                         STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
  368                         ktr_writerequest(td, queued_req);
  369                         ktr_freerequest(queued_req);
  370                 }
  371         }
  372 }
  373 
  374 /*
  375  * Submit a trace record for immediate commit to disk -- to be used only
  376  * where entering VFS is OK.  First drain any pending records that may have
  377  * been cached in the thread.
  378  */
  379 static void
  380 ktr_submitrequest(struct thread *td, struct ktr_request *req)
  381 {
  382 
  383         ktrace_assert(td);
  384 
  385         sx_xlock(&ktrace_sx);
  386         ktr_drain(td);
  387         ktr_writerequest(td, req);
  388         ktr_freerequest(req);
  389         sx_xunlock(&ktrace_sx);
  390         ktrace_exit(td);
  391 }
  392 
  393 static void
  394 ktr_freerequest(struct ktr_request *req)
  395 {
  396 
  397         mtx_lock(&ktrace_mtx);
  398         ktr_freerequest_locked(req);
  399         mtx_unlock(&ktrace_mtx);
  400 }
  401 
  402 static void
  403 ktr_freerequest_locked(struct ktr_request *req)
  404 {
  405 
  406         mtx_assert(&ktrace_mtx, MA_OWNED);
  407         if (req->ktr_buffer != NULL)
  408                 free(req->ktr_buffer, M_KTRACE);
  409         STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
  410 }
  411 
  412 /*
  413  * Disable tracing for a process and release all associated resources.
  414  * The caller is responsible for releasing a reference on the returned
  415  * vnode and credentials.
  416  */
  417 static void
  418 ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp)
  419 {
  420         struct ktr_request *req;
  421 
  422         PROC_LOCK_ASSERT(p, MA_OWNED);
  423         mtx_assert(&ktrace_mtx, MA_OWNED);
  424         *uc = p->p_tracecred;
  425         p->p_tracecred = NULL;
  426         if (vp != NULL)
  427                 *vp = p->p_tracevp;
  428         p->p_tracevp = NULL;
  429         p->p_traceflag = 0;
  430         while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
  431                 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
  432                 ktr_freerequest_locked(req);
  433         }
  434 }
  435 
  436 void
  437 ktrsyscall(code, narg, args)
  438         int code, narg;
  439         register_t args[];
  440 {
  441         struct ktr_request *req;
  442         struct ktr_syscall *ktp;
  443         size_t buflen;
  444         char *buf = NULL;
  445 
  446         buflen = sizeof(register_t) * narg;
  447         if (buflen > 0) {
  448                 buf = malloc(buflen, M_KTRACE, M_WAITOK);
  449                 bcopy(args, buf, buflen);
  450         }
  451         req = ktr_getrequest(KTR_SYSCALL);
  452         if (req == NULL) {
  453                 if (buf != NULL)
  454                         free(buf, M_KTRACE);
  455                 return;
  456         }
  457         ktp = &req->ktr_data.ktr_syscall;
  458         ktp->ktr_code = code;
  459         ktp->ktr_narg = narg;
  460         if (buflen > 0) {
  461                 req->ktr_header.ktr_len = buflen;
  462                 req->ktr_buffer = buf;
  463         }
  464         ktr_submitrequest(curthread, req);
  465 }
  466 
  467 void
  468 ktrsysret(code, error, retval)
  469         int code, error;
  470         register_t retval;
  471 {
  472         struct ktr_request *req;
  473         struct ktr_sysret *ktp;
  474 
  475         req = ktr_getrequest(KTR_SYSRET);
  476         if (req == NULL)
  477                 return;
  478         ktp = &req->ktr_data.ktr_sysret;
  479         ktp->ktr_code = code;
  480         ktp->ktr_error = error;
  481         ktp->ktr_retval = ((error == 0) ? retval: 0);           /* what about val2 ? */
  482         ktr_submitrequest(curthread, req);
  483 }
  484 
  485 /*
  486  * When a setuid process execs, disable tracing.
  487  *
  488  * XXX: We toss any pending asynchronous records.
  489  */
  490 void
  491 ktrprocexec(struct proc *p, struct ucred **uc, struct vnode **vp)
  492 {
  493 
  494         PROC_LOCK_ASSERT(p, MA_OWNED);
  495         mtx_lock(&ktrace_mtx);
  496         ktr_freeproc(p, uc, vp);
  497         mtx_unlock(&ktrace_mtx);
  498 }
  499 
  500 /*
  501  * When a process exits, drain per-process asynchronous trace records
  502  * and disable tracing.
  503  */
  504 void
  505 ktrprocexit(struct thread *td)
  506 {
  507         struct ktr_request *req;
  508         struct proc *p;
  509         struct ucred *cred;
  510         struct vnode *vp;
  511         int vfslocked;
  512 
  513         p = td->td_proc;
  514         if (p->p_traceflag == 0)
  515                 return;
  516 
  517         ktrace_enter(td);
  518         req = ktr_getrequest_entered(td, KTR_PROCDTOR);
  519         if (req != NULL)
  520                 ktr_enqueuerequest(td, req);
  521         sx_xlock(&ktrace_sx);
  522         ktr_drain(td);
  523         sx_xunlock(&ktrace_sx);
  524         PROC_LOCK(p);
  525         mtx_lock(&ktrace_mtx);
  526         ktr_freeproc(p, &cred, &vp);
  527         mtx_unlock(&ktrace_mtx);
  528         PROC_UNLOCK(p);
  529         if (vp != NULL) {
  530                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  531                 vrele(vp);
  532                 VFS_UNLOCK_GIANT(vfslocked);
  533         }
  534         if (cred != NULL)
  535                 crfree(cred);
  536         ktrace_exit(td);
  537 }
  538 
  539 static void
  540 ktrprocctor_entered(struct thread *td, struct proc *p)
  541 {
  542         struct ktr_proc_ctor *ktp;
  543         struct ktr_request *req;
  544         struct thread *td2;
  545 
  546         ktrace_assert(td);
  547         td2 = FIRST_THREAD_IN_PROC(p);
  548         req = ktr_getrequest_entered(td2, KTR_PROCCTOR);
  549         if (req == NULL)
  550                 return;
  551         ktp = &req->ktr_data.ktr_proc_ctor;
  552         ktp->sv_flags = p->p_sysent->sv_flags;
  553         ktr_enqueuerequest(td2, req);
  554 }
  555 
  556 void
  557 ktrprocctor(struct proc *p)
  558 {
  559         struct thread *td = curthread;
  560 
  561         if ((p->p_traceflag & KTRFAC_MASK) == 0)
  562                 return;
  563 
  564         ktrace_enter(td);
  565         ktrprocctor_entered(td, p);
  566         ktrace_exit(td);
  567 }
  568 
  569 /*
  570  * When a process forks, enable tracing in the new process if needed.
  571  */
  572 void
  573 ktrprocfork(struct proc *p1, struct proc *p2)
  574 {
  575 
  576         PROC_LOCK(p1);
  577         mtx_lock(&ktrace_mtx);
  578         KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
  579         if (p1->p_traceflag & KTRFAC_INHERIT) {
  580                 p2->p_traceflag = p1->p_traceflag;
  581                 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
  582                         VREF(p2->p_tracevp);
  583                         KASSERT(p1->p_tracecred != NULL,
  584                             ("ktrace vnode with no cred"));
  585                         p2->p_tracecred = crhold(p1->p_tracecred);
  586                 }
  587         }
  588         mtx_unlock(&ktrace_mtx);
  589         PROC_UNLOCK(p1);
  590 
  591         ktrprocctor(p2);
  592 }
  593 
  594 /*
  595  * When a thread returns, drain any asynchronous records generated by the
  596  * system call.
  597  */
  598 void
  599 ktruserret(struct thread *td)
  600 {
  601 
  602         ktrace_enter(td);
  603         sx_xlock(&ktrace_sx);
  604         ktr_drain(td);
  605         sx_xunlock(&ktrace_sx);
  606         ktrace_exit(td);
  607 }
  608 
  609 void
  610 ktrnamei(path)
  611         char *path;
  612 {
  613         struct ktr_request *req;
  614         int namelen;
  615         char *buf = NULL;
  616 
  617         namelen = strlen(path);
  618         if (namelen > 0) {
  619                 buf = malloc(namelen, M_KTRACE, M_WAITOK);
  620                 bcopy(path, buf, namelen);
  621         }
  622         req = ktr_getrequest(KTR_NAMEI);
  623         if (req == NULL) {
  624                 if (buf != NULL)
  625                         free(buf, M_KTRACE);
  626                 return;
  627         }
  628         if (namelen > 0) {
  629                 req->ktr_header.ktr_len = namelen;
  630                 req->ktr_buffer = buf;
  631         }
  632         ktr_submitrequest(curthread, req);
  633 }
  634 
  635 void
  636 ktrsysctl(name, namelen)
  637         int *name;
  638         u_int namelen;
  639 {
  640         struct ktr_request *req;
  641         u_int mib[CTL_MAXNAME + 2];
  642         char *mibname;
  643         size_t mibnamelen;
  644         int error;
  645 
  646         /* Lookup name of mib. */    
  647         KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
  648         mib[0] = 0;
  649         mib[1] = 1;
  650         bcopy(name, mib + 2, namelen * sizeof(*name));
  651         mibnamelen = 128;
  652         mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
  653         error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
  654             NULL, 0, &mibnamelen, 0);
  655         if (error) {
  656                 free(mibname, M_KTRACE);
  657                 return;
  658         }
  659         req = ktr_getrequest(KTR_SYSCTL);
  660         if (req == NULL) {
  661                 free(mibname, M_KTRACE);
  662                 return;
  663         }
  664         req->ktr_header.ktr_len = mibnamelen;
  665         req->ktr_buffer = mibname;
  666         ktr_submitrequest(curthread, req);
  667 }
  668 
  669 void
  670 ktrgenio(fd, rw, uio, error)
  671         int fd;
  672         enum uio_rw rw;
  673         struct uio *uio;
  674         int error;
  675 {
  676         struct ktr_request *req;
  677         struct ktr_genio *ktg;
  678         int datalen;
  679         char *buf;
  680 
  681         if (error) {
  682                 free(uio, M_IOV);
  683                 return;
  684         }
  685         uio->uio_offset = 0;
  686         uio->uio_rw = UIO_WRITE;
  687         datalen = imin(uio->uio_resid, ktr_geniosize);
  688         buf = malloc(datalen, M_KTRACE, M_WAITOK);
  689         error = uiomove(buf, datalen, uio);
  690         free(uio, M_IOV);
  691         if (error) {
  692                 free(buf, M_KTRACE);
  693                 return;
  694         }
  695         req = ktr_getrequest(KTR_GENIO);
  696         if (req == NULL) {
  697                 free(buf, M_KTRACE);
  698                 return;
  699         }
  700         ktg = &req->ktr_data.ktr_genio;
  701         ktg->ktr_fd = fd;
  702         ktg->ktr_rw = rw;
  703         req->ktr_header.ktr_len = datalen;
  704         req->ktr_buffer = buf;
  705         ktr_submitrequest(curthread, req);
  706 }
  707 
  708 void
  709 ktrpsig(sig, action, mask, code)
  710         int sig;
  711         sig_t action;
  712         sigset_t *mask;
  713         int code;
  714 {
  715         struct thread *td = curthread;
  716         struct ktr_request *req;
  717         struct ktr_psig *kp;
  718 
  719         req = ktr_getrequest(KTR_PSIG);
  720         if (req == NULL)
  721                 return;
  722         kp = &req->ktr_data.ktr_psig;
  723         kp->signo = (char)sig;
  724         kp->action = action;
  725         kp->mask = *mask;
  726         kp->code = code;
  727         ktr_enqueuerequest(td, req);
  728         ktrace_exit(td);
  729 }
  730 
  731 void
  732 ktrcsw(out, user, wmesg)
  733         int out, user;
  734         const char *wmesg;
  735 {
  736         struct thread *td = curthread;
  737         struct ktr_request *req;
  738         struct ktr_csw *kc;
  739 
  740         req = ktr_getrequest(KTR_CSW);
  741         if (req == NULL)
  742                 return;
  743         kc = &req->ktr_data.ktr_csw;
  744         kc->out = out;
  745         kc->user = user;
  746         if (wmesg != NULL)
  747                 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg));
  748         else
  749                 bzero(kc->wmesg, sizeof(kc->wmesg));
  750         ktr_enqueuerequest(td, req);
  751         ktrace_exit(td);
  752 }
  753 
  754 void
  755 ktrstruct(name, namelen, data, datalen)
  756         const char *name;
  757         size_t namelen;
  758         void *data;
  759         size_t datalen;
  760 {
  761         struct ktr_request *req;
  762         char *buf = NULL;
  763         size_t buflen;
  764 
  765         if (!data)
  766                 datalen = 0;
  767         buflen = namelen + 1 + datalen;
  768         buf = malloc(buflen, M_KTRACE, M_WAITOK);
  769         bcopy(name, buf, namelen);
  770         buf[namelen] = '\0';
  771         bcopy(data, buf + namelen + 1, datalen);
  772         if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
  773                 free(buf, M_KTRACE);
  774                 return;
  775         }
  776         req->ktr_buffer = buf;
  777         req->ktr_header.ktr_len = buflen;
  778         ktr_submitrequest(curthread, req);
  779 }
  780 
  781 void
  782 ktrfault(vaddr, type)
  783         vm_offset_t vaddr;
  784         int type;
  785 {
  786         struct thread *td = curthread;
  787         struct ktr_request *req;
  788         struct ktr_fault *kf;
  789 
  790         req = ktr_getrequest(KTR_FAULT);
  791         if (req == NULL)
  792                 return;
  793         kf = &req->ktr_data.ktr_fault;
  794         kf->vaddr = vaddr;
  795         kf->type = type;
  796         ktr_enqueuerequest(td, req);
  797         ktrace_exit(td);
  798 }
  799 
  800 void
  801 ktrfaultend(result)
  802         int result;
  803 {
  804         struct thread *td = curthread;
  805         struct ktr_request *req;
  806         struct ktr_faultend *kf;
  807 
  808         req = ktr_getrequest(KTR_FAULTEND);
  809         if (req == NULL)
  810                 return;
  811         kf = &req->ktr_data.ktr_faultend;
  812         kf->result = result;
  813         ktr_enqueuerequest(td, req);
  814         ktrace_exit(td);
  815 }
  816 #endif /* KTRACE */
  817 
  818 /* Interface and common routines */
  819 
  820 #ifndef _SYS_SYSPROTO_H_
  821 struct ktrace_args {
  822         char    *fname;
  823         int     ops;
  824         int     facs;
  825         int     pid;
  826 };
  827 #endif
  828 /* ARGSUSED */
  829 int
  830 ktrace(td, uap)
  831         struct thread *td;
  832         register struct ktrace_args *uap;
  833 {
  834 #ifdef KTRACE
  835         register struct vnode *vp = NULL;
  836         register struct proc *p;
  837         struct pgrp *pg;
  838         int facs = uap->facs & ~KTRFAC_ROOT;
  839         int ops = KTROP(uap->ops);
  840         int descend = uap->ops & KTRFLAG_DESCEND;
  841         int nfound, ret = 0;
  842         int flags, error = 0, vfslocked;
  843         struct nameidata nd;
  844         struct ucred *cred;
  845 
  846         /*
  847          * Need something to (un)trace.
  848          */
  849         if (ops != KTROP_CLEARFILE && facs == 0)
  850                 return (EINVAL);
  851 
  852         ktrace_enter(td);
  853         if (ops != KTROP_CLEAR) {
  854                 /*
  855                  * an operation which requires a file argument.
  856                  */
  857                 NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE,
  858                     uap->fname, td);
  859                 flags = FREAD | FWRITE | O_NOFOLLOW;
  860                 error = vn_open(&nd, &flags, 0, NULL);
  861                 if (error) {
  862                         ktrace_exit(td);
  863                         return (error);
  864                 }
  865                 vfslocked = NDHASGIANT(&nd);
  866                 NDFREE(&nd, NDF_ONLY_PNBUF);
  867                 vp = nd.ni_vp;
  868                 VOP_UNLOCK(vp, 0);
  869                 if (vp->v_type != VREG) {
  870                         (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
  871                         VFS_UNLOCK_GIANT(vfslocked);
  872                         ktrace_exit(td);
  873                         return (EACCES);
  874                 }
  875                 VFS_UNLOCK_GIANT(vfslocked);
  876         }
  877         /*
  878          * Clear all uses of the tracefile.
  879          */
  880         if (ops == KTROP_CLEARFILE) {
  881                 int vrele_count;
  882 
  883                 vrele_count = 0;
  884                 sx_slock(&allproc_lock);
  885                 FOREACH_PROC_IN_SYSTEM(p) {
  886                         PROC_LOCK(p);
  887                         if (p->p_tracevp == vp) {
  888                                 if (ktrcanset(td, p)) {
  889                                         mtx_lock(&ktrace_mtx);
  890                                         ktr_freeproc(p, &cred, NULL);
  891                                         mtx_unlock(&ktrace_mtx);
  892                                         vrele_count++;
  893                                         crfree(cred);
  894                                 } else
  895                                         error = EPERM;
  896                         }
  897                         PROC_UNLOCK(p);
  898                 }
  899                 sx_sunlock(&allproc_lock);
  900                 if (vrele_count > 0) {
  901                         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  902                         while (vrele_count-- > 0)
  903                                 vrele(vp);
  904                         VFS_UNLOCK_GIANT(vfslocked);
  905                 }
  906                 goto done;
  907         }
  908         /*
  909          * do it
  910          */
  911         sx_slock(&proctree_lock);
  912         if (uap->pid < 0) {
  913                 /*
  914                  * by process group
  915                  */
  916                 pg = pgfind(-uap->pid);
  917                 if (pg == NULL) {
  918                         sx_sunlock(&proctree_lock);
  919                         error = ESRCH;
  920                         goto done;
  921                 }
  922                 /*
  923                  * ktrops() may call vrele(). Lock pg_members
  924                  * by the proctree_lock rather than pg_mtx.
  925                  */
  926                 PGRP_UNLOCK(pg);
  927                 nfound = 0;
  928                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  929                         PROC_LOCK(p);
  930                         if (p->p_state == PRS_NEW ||
  931                             p_cansee(td, p) != 0) {
  932                                 PROC_UNLOCK(p); 
  933                                 continue;
  934                         }
  935                         PROC_UNLOCK(p); 
  936                         nfound++;
  937                         if (descend)
  938                                 ret |= ktrsetchildren(td, p, ops, facs, vp);
  939                         else
  940                                 ret |= ktrops(td, p, ops, facs, vp);
  941                 }
  942                 if (nfound == 0) {
  943                         sx_sunlock(&proctree_lock);
  944                         error = ESRCH;
  945                         goto done;
  946                 }
  947         } else {
  948                 /*
  949                  * by pid
  950                  */
  951                 p = pfind(uap->pid);
  952                 if (p == NULL) {
  953                         sx_sunlock(&proctree_lock);
  954                         error = ESRCH;
  955                         goto done;
  956                 }
  957                 error = p_cansee(td, p);
  958                 /*
  959                  * The slock of the proctree lock will keep this process
  960                  * from going away, so unlocking the proc here is ok.
  961                  */
  962                 PROC_UNLOCK(p);
  963                 if (error) {
  964                         sx_sunlock(&proctree_lock);
  965                         goto done;
  966                 }
  967                 if (descend)
  968                         ret |= ktrsetchildren(td, p, ops, facs, vp);
  969                 else
  970                         ret |= ktrops(td, p, ops, facs, vp);
  971         }
  972         sx_sunlock(&proctree_lock);
  973         if (!ret)
  974                 error = EPERM;
  975 done:
  976         if (vp != NULL) {
  977                 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
  978                 (void) vn_close(vp, FWRITE, td->td_ucred, td);
  979                 VFS_UNLOCK_GIANT(vfslocked);
  980         }
  981         ktrace_exit(td);
  982         return (error);
  983 #else /* !KTRACE */
  984         return (ENOSYS);
  985 #endif /* KTRACE */
  986 }
  987 
  988 /* ARGSUSED */
  989 int
  990 utrace(td, uap)
  991         struct thread *td;
  992         register struct utrace_args *uap;
  993 {
  994 
  995 #ifdef KTRACE
  996         struct ktr_request *req;
  997         void *cp;
  998         int error;
  999 
 1000         if (!KTRPOINT(td, KTR_USER))
 1001                 return (0);
 1002         if (uap->len > KTR_USER_MAXLEN)
 1003                 return (EINVAL);
 1004         cp = malloc(uap->len, M_KTRACE, M_WAITOK);
 1005         error = copyin(uap->addr, cp, uap->len);
 1006         if (error) {
 1007                 free(cp, M_KTRACE);
 1008                 return (error);
 1009         }
 1010         req = ktr_getrequest(KTR_USER);
 1011         if (req == NULL) {
 1012                 free(cp, M_KTRACE);
 1013                 return (ENOMEM);
 1014         }
 1015         req->ktr_buffer = cp;
 1016         req->ktr_header.ktr_len = uap->len;
 1017         ktr_submitrequest(td, req);
 1018         return (0);
 1019 #else /* !KTRACE */
 1020         return (ENOSYS);
 1021 #endif /* KTRACE */
 1022 }
 1023 
 1024 #ifdef KTRACE
 1025 static int
 1026 ktrops(td, p, ops, facs, vp)
 1027         struct thread *td;
 1028         struct proc *p;
 1029         int ops, facs;
 1030         struct vnode *vp;
 1031 {
 1032         struct vnode *tracevp = NULL;
 1033         struct ucred *tracecred = NULL;
 1034 
 1035         PROC_LOCK(p);
 1036         if (!ktrcanset(td, p)) {
 1037                 PROC_UNLOCK(p);
 1038                 return (0);
 1039         }
 1040         mtx_lock(&ktrace_mtx);
 1041         if (ops == KTROP_SET) {
 1042                 if (p->p_tracevp != vp) {
 1043                         /*
 1044                          * if trace file already in use, relinquish below
 1045                          */
 1046                         tracevp = p->p_tracevp;
 1047                         VREF(vp);
 1048                         p->p_tracevp = vp;
 1049                 }
 1050                 if (p->p_tracecred != td->td_ucred) {
 1051                         tracecred = p->p_tracecred;
 1052                         p->p_tracecred = crhold(td->td_ucred);
 1053                 }
 1054                 p->p_traceflag |= facs;
 1055                 if (priv_check(td, PRIV_KTRACE) == 0)
 1056                         p->p_traceflag |= KTRFAC_ROOT;
 1057         } else {
 1058                 /* KTROP_CLEAR */
 1059                 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
 1060                         /* no more tracing */
 1061                         ktr_freeproc(p, &tracecred, &tracevp);
 1062         }
 1063         mtx_unlock(&ktrace_mtx);
 1064         if ((p->p_traceflag & KTRFAC_MASK) != 0)
 1065                 ktrprocctor_entered(td, p);
 1066         PROC_UNLOCK(p);
 1067         if (tracevp != NULL) {
 1068                 int vfslocked;
 1069 
 1070                 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
 1071                 vrele(tracevp);
 1072                 VFS_UNLOCK_GIANT(vfslocked);
 1073         }
 1074         if (tracecred != NULL)
 1075                 crfree(tracecred);
 1076 
 1077         return (1);
 1078 }
 1079 
 1080 static int
 1081 ktrsetchildren(td, top, ops, facs, vp)
 1082         struct thread *td;
 1083         struct proc *top;
 1084         int ops, facs;
 1085         struct vnode *vp;
 1086 {
 1087         register struct proc *p;
 1088         register int ret = 0;
 1089 
 1090         p = top;
 1091         sx_assert(&proctree_lock, SX_LOCKED);
 1092         for (;;) {
 1093                 ret |= ktrops(td, p, ops, facs, vp);
 1094                 /*
 1095                  * If this process has children, descend to them next,
 1096                  * otherwise do any siblings, and if done with this level,
 1097                  * follow back up the tree (but not past top).
 1098                  */
 1099                 if (!LIST_EMPTY(&p->p_children))
 1100                         p = LIST_FIRST(&p->p_children);
 1101                 else for (;;) {
 1102                         if (p == top)
 1103                                 return (ret);
 1104                         if (LIST_NEXT(p, p_sibling)) {
 1105                                 p = LIST_NEXT(p, p_sibling);
 1106                                 break;
 1107                         }
 1108                         p = p->p_pptr;
 1109                 }
 1110         }
 1111         /*NOTREACHED*/
 1112 }
 1113 
 1114 static void
 1115 ktr_writerequest(struct thread *td, struct ktr_request *req)
 1116 {
 1117         struct ktr_header *kth;
 1118         struct vnode *vp;
 1119         struct proc *p;
 1120         struct ucred *cred;
 1121         struct uio auio;
 1122         struct iovec aiov[3];
 1123         struct mount *mp;
 1124         int datalen, buflen, vrele_count;
 1125         int error, vfslocked;
 1126 
 1127         /*
 1128          * We hold the vnode and credential for use in I/O in case ktrace is
 1129          * disabled on the process as we write out the request.
 1130          *
 1131          * XXXRW: This is not ideal: we could end up performing a write after
 1132          * the vnode has been closed.
 1133          */
 1134         mtx_lock(&ktrace_mtx);
 1135         vp = td->td_proc->p_tracevp;
 1136         cred = td->td_proc->p_tracecred;
 1137 
 1138         /*
 1139          * If vp is NULL, the vp has been cleared out from under this
 1140          * request, so just drop it.  Make sure the credential and vnode are
 1141          * in sync: we should have both or neither.
 1142          */
 1143         if (vp == NULL) {
 1144                 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
 1145                 mtx_unlock(&ktrace_mtx);
 1146                 return;
 1147         }
 1148         VREF(vp);
 1149         KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
 1150         crhold(cred);
 1151         mtx_unlock(&ktrace_mtx);
 1152 
 1153         kth = &req->ktr_header;
 1154         KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) <
 1155             sizeof(data_lengths) / sizeof(data_lengths[0]),
 1156             ("data_lengths array overflow"));
 1157         datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
 1158         buflen = kth->ktr_len;
 1159         auio.uio_iov = &aiov[0];
 1160         auio.uio_offset = 0;
 1161         auio.uio_segflg = UIO_SYSSPACE;
 1162         auio.uio_rw = UIO_WRITE;
 1163         aiov[0].iov_base = (caddr_t)kth;
 1164         aiov[0].iov_len = sizeof(struct ktr_header);
 1165         auio.uio_resid = sizeof(struct ktr_header);
 1166         auio.uio_iovcnt = 1;
 1167         auio.uio_td = td;
 1168         if (datalen != 0) {
 1169                 aiov[1].iov_base = (caddr_t)&req->ktr_data;
 1170                 aiov[1].iov_len = datalen;
 1171                 auio.uio_resid += datalen;
 1172                 auio.uio_iovcnt++;
 1173                 kth->ktr_len += datalen;
 1174         }
 1175         if (buflen != 0) {
 1176                 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
 1177                 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
 1178                 aiov[auio.uio_iovcnt].iov_len = buflen;
 1179                 auio.uio_resid += buflen;
 1180                 auio.uio_iovcnt++;
 1181         }
 1182 
 1183         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1184         vn_start_write(vp, &mp, V_WAIT);
 1185         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1186 #ifdef MAC
 1187         error = mac_vnode_check_write(cred, NOCRED, vp);
 1188         if (error == 0)
 1189 #endif
 1190                 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
 1191         VOP_UNLOCK(vp, 0);
 1192         vn_finished_write(mp);
 1193         crfree(cred);
 1194         if (!error) {
 1195                 vrele(vp);
 1196                 VFS_UNLOCK_GIANT(vfslocked);
 1197                 return;
 1198         }
 1199         VFS_UNLOCK_GIANT(vfslocked);
 1200 
 1201         /*
 1202          * If error encountered, give up tracing on this vnode.  We defer
 1203          * all the vrele()'s on the vnode until after we are finished walking
 1204          * the various lists to avoid needlessly holding locks.
 1205          * NB: at this point we still hold the vnode reference that must
 1206          * not go away as we need the valid vnode to compare with. Thus let
 1207          * vrele_count start at 1 and the reference will be freed
 1208          * by the loop at the end after our last use of vp.
 1209          */
 1210         log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
 1211             error);
 1212         vrele_count = 1;
 1213         /*
 1214          * First, clear this vnode from being used by any processes in the
 1215          * system.
 1216          * XXX - If one process gets an EPERM writing to the vnode, should
 1217          * we really do this?  Other processes might have suitable
 1218          * credentials for the operation.
 1219          */
 1220         cred = NULL;
 1221         sx_slock(&allproc_lock);
 1222         FOREACH_PROC_IN_SYSTEM(p) {
 1223                 PROC_LOCK(p);
 1224                 if (p->p_tracevp == vp) {
 1225                         mtx_lock(&ktrace_mtx);
 1226                         ktr_freeproc(p, &cred, NULL);
 1227                         mtx_unlock(&ktrace_mtx);
 1228                         vrele_count++;
 1229                 }
 1230                 PROC_UNLOCK(p);
 1231                 if (cred != NULL) {
 1232                         crfree(cred);
 1233                         cred = NULL;
 1234                 }
 1235         }
 1236         sx_sunlock(&allproc_lock);
 1237 
 1238         vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 1239         while (vrele_count-- > 0)
 1240                 vrele(vp);
 1241         VFS_UNLOCK_GIANT(vfslocked);
 1242 }
 1243 
 1244 /*
 1245  * Return true if caller has permission to set the ktracing state
 1246  * of target.  Essentially, the target can't possess any
 1247  * more permissions than the caller.  KTRFAC_ROOT signifies that
 1248  * root previously set the tracing status on the target process, and
 1249  * so, only root may further change it.
 1250  */
 1251 static int
 1252 ktrcanset(td, targetp)
 1253         struct thread *td;
 1254         struct proc *targetp;
 1255 {
 1256 
 1257         PROC_LOCK_ASSERT(targetp, MA_OWNED);
 1258         if (targetp->p_traceflag & KTRFAC_ROOT &&
 1259             priv_check(td, PRIV_KTRACE))
 1260                 return (0);
 1261 
 1262         if (p_candebug(td, targetp) != 0)
 1263                 return (0);
 1264 
 1265         return (1);
 1266 }
 1267 
 1268 #endif /* KTRACE */

Cache object: 39b75bd5205e303f59132be35ff7165a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.