The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_ktrace.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1993
    3  *      The Regents of the University of California.
    4  * Copyright (c) 2005 Robert N. M. Watson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  * 4. Neither the name of the University nor the names of its contributors
   16  *    may be used to endorse or promote products derived from this software
   17  *    without specific prior written permission.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  *
   31  *      @(#)kern_ktrace.c       8.2 (Berkeley) 9/23/93
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/11.2/sys/kern/kern_ktrace.c 331722 2018-03-29 02:50:57Z eadler $");
   36 
   37 #include "opt_ktrace.h"
   38 
   39 #include <sys/param.h>
   40 #include <sys/capsicum.h>
   41 #include <sys/systm.h>
   42 #include <sys/fcntl.h>
   43 #include <sys/kernel.h>
   44 #include <sys/kthread.h>
   45 #include <sys/lock.h>
   46 #include <sys/mutex.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mount.h>
   49 #include <sys/namei.h>
   50 #include <sys/priv.h>
   51 #include <sys/proc.h>
   52 #include <sys/unistd.h>
   53 #include <sys/vnode.h>
   54 #include <sys/socket.h>
   55 #include <sys/stat.h>
   56 #include <sys/ktrace.h>
   57 #include <sys/sx.h>
   58 #include <sys/sysctl.h>
   59 #include <sys/sysent.h>
   60 #include <sys/syslog.h>
   61 #include <sys/sysproto.h>
   62 
   63 #include <security/mac/mac_framework.h>
   64 
   65 /*
   66  * The ktrace facility allows the tracing of certain key events in user space
   67  * processes, such as system calls, signal delivery, context switches, and
   68  * user generated events using utrace(2).  It works by streaming event
   69  * records and data to a vnode associated with the process using the
   70  * ktrace(2) system call.  In general, records can be written directly from
   71  * the context that generates the event.  One important exception to this is
   72  * during a context switch, where sleeping is not permitted.  To handle this
   73  * case, trace events are generated using in-kernel ktr_request records, and
   74  * then delivered to disk at a convenient moment -- either immediately, the
   75  * next traceable event, at system call return, or at process exit.
   76  *
   77  * When dealing with multiple threads or processes writing to the same event
   78  * log, ordering guarantees are weak: specifically, if an event has multiple
   79  * records (i.e., system call enter and return), they may be interlaced with
   80  * records from another event.  Process and thread ID information is provided
   81  * in the record, and user applications can de-interlace events if required.
   82  */
   83 
   84 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
   85 
   86 #ifdef KTRACE
   87 
   88 FEATURE(ktrace, "Kernel support for system-call tracing");
   89 
   90 #ifndef KTRACE_REQUEST_POOL
   91 #define KTRACE_REQUEST_POOL     100
   92 #endif
   93 
   94 struct ktr_request {
   95         struct  ktr_header ktr_header;
   96         void    *ktr_buffer;
   97         union {
   98                 struct  ktr_proc_ctor ktr_proc_ctor;
   99                 struct  ktr_cap_fail ktr_cap_fail;
  100                 struct  ktr_syscall ktr_syscall;
  101                 struct  ktr_sysret ktr_sysret;
  102                 struct  ktr_genio ktr_genio;
  103                 struct  ktr_psig ktr_psig;
  104                 struct  ktr_csw ktr_csw;
  105                 struct  ktr_fault ktr_fault;
  106                 struct  ktr_faultend ktr_faultend;
  107                 struct  ktr_struct_array ktr_struct_array;
  108         } ktr_data;
  109         STAILQ_ENTRY(ktr_request) ktr_list;
  110 };
  111 
  112 static int data_lengths[] = {
  113         [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args),
  114         [KTR_SYSRET] = sizeof(struct ktr_sysret),
  115         [KTR_NAMEI] = 0,
  116         [KTR_GENIO] = sizeof(struct ktr_genio),
  117         [KTR_PSIG] = sizeof(struct ktr_psig),
  118         [KTR_CSW] = sizeof(struct ktr_csw),
  119         [KTR_USER] = 0,
  120         [KTR_STRUCT] = 0,
  121         [KTR_SYSCTL] = 0,
  122         [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor),
  123         [KTR_PROCDTOR] = 0,
  124         [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail),
  125         [KTR_FAULT] = sizeof(struct ktr_fault),
  126         [KTR_FAULTEND] = sizeof(struct ktr_faultend),
  127         [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array),
  128 };
  129 
  130 static STAILQ_HEAD(, ktr_request) ktr_free;
  131 
  132 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
  133 
  134 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
  135 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
  136 
  137 u_int ktr_geniosize = PAGE_SIZE;
  138 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize,
  139     0, "Maximum size of genio event payload");
  140 
  141 static int print_message = 1;
  142 static struct mtx ktrace_mtx;
  143 static struct sx ktrace_sx;
  144 
  145 static void ktrace_init(void *dummy);
  146 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
  147 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize);
  148 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type);
  149 static struct ktr_request *ktr_getrequest(int type);
  150 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
  151 static void ktr_freeproc(struct proc *p, struct ucred **uc,
  152     struct vnode **vp);
  153 static void ktr_freerequest(struct ktr_request *req);
  154 static void ktr_freerequest_locked(struct ktr_request *req);
  155 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
  156 static int ktrcanset(struct thread *,struct proc *);
  157 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
  158 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
  159 static void ktrprocctor_entered(struct thread *, struct proc *);
  160 
  161 /*
  162  * ktrace itself generates events, such as context switches, which we do not
  163  * wish to trace.  Maintain a flag, TDP_INKTRACE, on each thread to determine
  164  * whether or not it is in a region where tracing of events should be
  165  * suppressed.
  166  */
  167 static void
  168 ktrace_enter(struct thread *td)
  169 {
  170 
  171         KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
  172         td->td_pflags |= TDP_INKTRACE;
  173 }
  174 
  175 static void
  176 ktrace_exit(struct thread *td)
  177 {
  178 
  179         KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
  180         td->td_pflags &= ~TDP_INKTRACE;
  181 }
  182 
  183 static void
  184 ktrace_assert(struct thread *td)
  185 {
  186 
  187         KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
  188 }
  189 
  190 static void
  191 ktrace_init(void *dummy)
  192 {
  193         struct ktr_request *req;
  194         int i;
  195 
  196         mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
  197         sx_init(&ktrace_sx, "ktrace_sx");
  198         STAILQ_INIT(&ktr_free);
  199         for (i = 0; i < ktr_requestpool; i++) {
  200                 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
  201                 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
  202         }
  203 }
  204 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
  205 
  206 static int
  207 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
  208 {
  209         struct thread *td;
  210         u_int newsize, oldsize, wantsize;
  211         int error;
  212 
  213         /* Handle easy read-only case first to avoid warnings from GCC. */
  214         if (!req->newptr) {
  215                 oldsize = ktr_requestpool;
  216                 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
  217         }
  218 
  219         error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
  220         if (error)
  221                 return (error);
  222         td = curthread;
  223         ktrace_enter(td);
  224         oldsize = ktr_requestpool;
  225         newsize = ktrace_resize_pool(oldsize, wantsize);
  226         ktrace_exit(td);
  227         error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
  228         if (error)
  229                 return (error);
  230         if (wantsize > oldsize && newsize < wantsize)
  231                 return (ENOSPC);
  232         return (0);
  233 }
  234 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
  235     &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU",
  236     "Pool buffer size for ktrace(1)");
  237 
  238 static u_int
  239 ktrace_resize_pool(u_int oldsize, u_int newsize)
  240 {
  241         STAILQ_HEAD(, ktr_request) ktr_new;
  242         struct ktr_request *req;
  243         int bound;
  244 
  245         print_message = 1;
  246         bound = newsize - oldsize;
  247         if (bound == 0)
  248                 return (ktr_requestpool);
  249         if (bound < 0) {
  250                 mtx_lock(&ktrace_mtx);
  251                 /* Shrink pool down to newsize if possible. */
  252                 while (bound++ < 0) {
  253                         req = STAILQ_FIRST(&ktr_free);
  254                         if (req == NULL)
  255                                 break;
  256                         STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
  257                         ktr_requestpool--;
  258                         free(req, M_KTRACE);
  259                 }
  260         } else {
  261                 /* Grow pool up to newsize. */
  262                 STAILQ_INIT(&ktr_new);
  263                 while (bound-- > 0) {
  264                         req = malloc(sizeof(struct ktr_request), M_KTRACE,
  265                             M_WAITOK);
  266                         STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list);
  267                 }
  268                 mtx_lock(&ktrace_mtx);
  269                 STAILQ_CONCAT(&ktr_free, &ktr_new);
  270                 ktr_requestpool += (newsize - oldsize);
  271         }
  272         mtx_unlock(&ktrace_mtx);
  273         return (ktr_requestpool);
  274 }
  275 
  276 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */
  277 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
  278     (sizeof((struct thread *)NULL)->td_name));
  279 
  280 static struct ktr_request *
  281 ktr_getrequest_entered(struct thread *td, int type)
  282 {
  283         struct ktr_request *req;
  284         struct proc *p = td->td_proc;
  285         int pm;
  286 
  287         mtx_lock(&ktrace_mtx);
  288         if (!KTRCHECK(td, type)) {
  289                 mtx_unlock(&ktrace_mtx);
  290                 return (NULL);
  291         }
  292         req = STAILQ_FIRST(&ktr_free);
  293         if (req != NULL) {
  294                 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
  295                 req->ktr_header.ktr_type = type;
  296                 if (p->p_traceflag & KTRFAC_DROP) {
  297                         req->ktr_header.ktr_type |= KTR_DROP;
  298                         p->p_traceflag &= ~KTRFAC_DROP;
  299                 }
  300                 mtx_unlock(&ktrace_mtx);
  301                 microtime(&req->ktr_header.ktr_time);
  302                 req->ktr_header.ktr_pid = p->p_pid;
  303                 req->ktr_header.ktr_tid = td->td_tid;
  304                 bcopy(td->td_name, req->ktr_header.ktr_comm,
  305                     sizeof(req->ktr_header.ktr_comm));
  306                 req->ktr_buffer = NULL;
  307                 req->ktr_header.ktr_len = 0;
  308         } else {
  309                 p->p_traceflag |= KTRFAC_DROP;
  310                 pm = print_message;
  311                 print_message = 0;
  312                 mtx_unlock(&ktrace_mtx);
  313                 if (pm)
  314                         printf("Out of ktrace request objects.\n");
  315         }
  316         return (req);
  317 }
  318 
  319 static struct ktr_request *
  320 ktr_getrequest(int type)
  321 {
  322         struct thread *td = curthread;
  323         struct ktr_request *req;
  324 
  325         ktrace_enter(td);
  326         req = ktr_getrequest_entered(td, type);
  327         if (req == NULL)
  328                 ktrace_exit(td);
  329 
  330         return (req);
  331 }
  332 
  333 /*
  334  * Some trace generation environments don't permit direct access to VFS,
  335  * such as during a context switch where sleeping is not allowed.  Under these
  336  * circumstances, queue a request to the thread to be written asynchronously
  337  * later.
  338  */
  339 static void
  340 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
  341 {
  342 
  343         mtx_lock(&ktrace_mtx);
  344         STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
  345         mtx_unlock(&ktrace_mtx);
  346 }
  347 
  348 /*
  349  * Drain any pending ktrace records from the per-thread queue to disk.  This
  350  * is used both internally before committing other records, and also on
  351  * system call return.  We drain all the ones we can find at the time when
  352  * drain is requested, but don't keep draining after that as those events
  353  * may be approximately "after" the current event.
  354  */
  355 static void
  356 ktr_drain(struct thread *td)
  357 {
  358         struct ktr_request *queued_req;
  359         STAILQ_HEAD(, ktr_request) local_queue;
  360 
  361         ktrace_assert(td);
  362         sx_assert(&ktrace_sx, SX_XLOCKED);
  363 
  364         STAILQ_INIT(&local_queue);
  365 
  366         if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
  367                 mtx_lock(&ktrace_mtx);
  368                 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
  369                 mtx_unlock(&ktrace_mtx);
  370 
  371                 while ((queued_req = STAILQ_FIRST(&local_queue))) {
  372                         STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
  373                         ktr_writerequest(td, queued_req);
  374                         ktr_freerequest(queued_req);
  375                 }
  376         }
  377 }
  378 
  379 /*
  380  * Submit a trace record for immediate commit to disk -- to be used only
  381  * where entering VFS is OK.  First drain any pending records that may have
  382  * been cached in the thread.
  383  */
  384 static void
  385 ktr_submitrequest(struct thread *td, struct ktr_request *req)
  386 {
  387 
  388         ktrace_assert(td);
  389 
  390         sx_xlock(&ktrace_sx);
  391         ktr_drain(td);
  392         ktr_writerequest(td, req);
  393         ktr_freerequest(req);
  394         sx_xunlock(&ktrace_sx);
  395         ktrace_exit(td);
  396 }
  397 
  398 static void
  399 ktr_freerequest(struct ktr_request *req)
  400 {
  401 
  402         mtx_lock(&ktrace_mtx);
  403         ktr_freerequest_locked(req);
  404         mtx_unlock(&ktrace_mtx);
  405 }
  406 
  407 static void
  408 ktr_freerequest_locked(struct ktr_request *req)
  409 {
  410 
  411         mtx_assert(&ktrace_mtx, MA_OWNED);
  412         if (req->ktr_buffer != NULL)
  413                 free(req->ktr_buffer, M_KTRACE);
  414         STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
  415 }
  416 
  417 /*
  418  * Disable tracing for a process and release all associated resources.
  419  * The caller is responsible for releasing a reference on the returned
  420  * vnode and credentials.
  421  */
  422 static void
  423 ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp)
  424 {
  425         struct ktr_request *req;
  426 
  427         PROC_LOCK_ASSERT(p, MA_OWNED);
  428         mtx_assert(&ktrace_mtx, MA_OWNED);
  429         *uc = p->p_tracecred;
  430         p->p_tracecred = NULL;
  431         if (vp != NULL)
  432                 *vp = p->p_tracevp;
  433         p->p_tracevp = NULL;
  434         p->p_traceflag = 0;
  435         while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
  436                 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
  437                 ktr_freerequest_locked(req);
  438         }
  439 }
  440 
  441 void
  442 ktrsyscall(int code, int narg, register_t args[])
  443 {
  444         struct ktr_request *req;
  445         struct ktr_syscall *ktp;
  446         size_t buflen;
  447         char *buf = NULL;
  448 
  449         buflen = sizeof(register_t) * narg;
  450         if (buflen > 0) {
  451                 buf = malloc(buflen, M_KTRACE, M_WAITOK);
  452                 bcopy(args, buf, buflen);
  453         }
  454         req = ktr_getrequest(KTR_SYSCALL);
  455         if (req == NULL) {
  456                 if (buf != NULL)
  457                         free(buf, M_KTRACE);
  458                 return;
  459         }
  460         ktp = &req->ktr_data.ktr_syscall;
  461         ktp->ktr_code = code;
  462         ktp->ktr_narg = narg;
  463         if (buflen > 0) {
  464                 req->ktr_header.ktr_len = buflen;
  465                 req->ktr_buffer = buf;
  466         }
  467         ktr_submitrequest(curthread, req);
  468 }
  469 
  470 void
  471 ktrsysret(int code, int error, register_t retval)
  472 {
  473         struct ktr_request *req;
  474         struct ktr_sysret *ktp;
  475 
  476         req = ktr_getrequest(KTR_SYSRET);
  477         if (req == NULL)
  478                 return;
  479         ktp = &req->ktr_data.ktr_sysret;
  480         ktp->ktr_code = code;
  481         ktp->ktr_error = error;
  482         ktp->ktr_retval = ((error == 0) ? retval: 0);           /* what about val2 ? */
  483         ktr_submitrequest(curthread, req);
  484 }
  485 
  486 /*
  487  * When a setuid process execs, disable tracing.
  488  *
  489  * XXX: We toss any pending asynchronous records.
  490  */
  491 void
  492 ktrprocexec(struct proc *p, struct ucred **uc, struct vnode **vp)
  493 {
  494 
  495         PROC_LOCK_ASSERT(p, MA_OWNED);
  496         mtx_lock(&ktrace_mtx);
  497         ktr_freeproc(p, uc, vp);
  498         mtx_unlock(&ktrace_mtx);
  499 }
  500 
  501 /*
  502  * When a process exits, drain per-process asynchronous trace records
  503  * and disable tracing.
  504  */
  505 void
  506 ktrprocexit(struct thread *td)
  507 {
  508         struct ktr_request *req;
  509         struct proc *p;
  510         struct ucred *cred;
  511         struct vnode *vp;
  512 
  513         p = td->td_proc;
  514         if (p->p_traceflag == 0)
  515                 return;
  516 
  517         ktrace_enter(td);
  518         req = ktr_getrequest_entered(td, KTR_PROCDTOR);
  519         if (req != NULL)
  520                 ktr_enqueuerequest(td, req);
  521         sx_xlock(&ktrace_sx);
  522         ktr_drain(td);
  523         sx_xunlock(&ktrace_sx);
  524         PROC_LOCK(p);
  525         mtx_lock(&ktrace_mtx);
  526         ktr_freeproc(p, &cred, &vp);
  527         mtx_unlock(&ktrace_mtx);
  528         PROC_UNLOCK(p);
  529         if (vp != NULL)
  530                 vrele(vp);
  531         if (cred != NULL)
  532                 crfree(cred);
  533         ktrace_exit(td);
  534 }
  535 
  536 static void
  537 ktrprocctor_entered(struct thread *td, struct proc *p)
  538 {
  539         struct ktr_proc_ctor *ktp;
  540         struct ktr_request *req;
  541         struct thread *td2;
  542 
  543         ktrace_assert(td);
  544         td2 = FIRST_THREAD_IN_PROC(p);
  545         req = ktr_getrequest_entered(td2, KTR_PROCCTOR);
  546         if (req == NULL)
  547                 return;
  548         ktp = &req->ktr_data.ktr_proc_ctor;
  549         ktp->sv_flags = p->p_sysent->sv_flags;
  550         ktr_enqueuerequest(td2, req);
  551 }
  552 
  553 void
  554 ktrprocctor(struct proc *p)
  555 {
  556         struct thread *td = curthread;
  557 
  558         if ((p->p_traceflag & KTRFAC_MASK) == 0)
  559                 return;
  560 
  561         ktrace_enter(td);
  562         ktrprocctor_entered(td, p);
  563         ktrace_exit(td);
  564 }
  565 
  566 /*
  567  * When a process forks, enable tracing in the new process if needed.
  568  */
  569 void
  570 ktrprocfork(struct proc *p1, struct proc *p2)
  571 {
  572 
  573         MPASS(p2->p_tracevp == NULL);
  574         MPASS(p2->p_traceflag == 0);
  575 
  576         if (p1->p_traceflag == 0)
  577                 return;
  578 
  579         PROC_LOCK(p1);
  580         mtx_lock(&ktrace_mtx);
  581         if (p1->p_traceflag & KTRFAC_INHERIT) {
  582                 p2->p_traceflag = p1->p_traceflag;
  583                 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
  584                         VREF(p2->p_tracevp);
  585                         KASSERT(p1->p_tracecred != NULL,
  586                             ("ktrace vnode with no cred"));
  587                         p2->p_tracecred = crhold(p1->p_tracecred);
  588                 }
  589         }
  590         mtx_unlock(&ktrace_mtx);
  591         PROC_UNLOCK(p1);
  592 
  593         ktrprocctor(p2);
  594 }
  595 
  596 /*
  597  * When a thread returns, drain any asynchronous records generated by the
  598  * system call.
  599  */
  600 void
  601 ktruserret(struct thread *td)
  602 {
  603 
  604         ktrace_enter(td);
  605         sx_xlock(&ktrace_sx);
  606         ktr_drain(td);
  607         sx_xunlock(&ktrace_sx);
  608         ktrace_exit(td);
  609 }
  610 
  611 void
  612 ktrnamei(path)
  613         char *path;
  614 {
  615         struct ktr_request *req;
  616         int namelen;
  617         char *buf = NULL;
  618 
  619         namelen = strlen(path);
  620         if (namelen > 0) {
  621                 buf = malloc(namelen, M_KTRACE, M_WAITOK);
  622                 bcopy(path, buf, namelen);
  623         }
  624         req = ktr_getrequest(KTR_NAMEI);
  625         if (req == NULL) {
  626                 if (buf != NULL)
  627                         free(buf, M_KTRACE);
  628                 return;
  629         }
  630         if (namelen > 0) {
  631                 req->ktr_header.ktr_len = namelen;
  632                 req->ktr_buffer = buf;
  633         }
  634         ktr_submitrequest(curthread, req);
  635 }
  636 
  637 void
  638 ktrsysctl(int *name, u_int namelen)
  639 {
  640         struct ktr_request *req;
  641         u_int mib[CTL_MAXNAME + 2];
  642         char *mibname;
  643         size_t mibnamelen;
  644         int error;
  645 
  646         /* Lookup name of mib. */    
  647         KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
  648         mib[0] = 0;
  649         mib[1] = 1;
  650         bcopy(name, mib + 2, namelen * sizeof(*name));
  651         mibnamelen = 128;
  652         mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
  653         error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
  654             NULL, 0, &mibnamelen, 0);
  655         if (error) {
  656                 free(mibname, M_KTRACE);
  657                 return;
  658         }
  659         req = ktr_getrequest(KTR_SYSCTL);
  660         if (req == NULL) {
  661                 free(mibname, M_KTRACE);
  662                 return;
  663         }
  664         req->ktr_header.ktr_len = mibnamelen;
  665         req->ktr_buffer = mibname;
  666         ktr_submitrequest(curthread, req);
  667 }
  668 
  669 void
  670 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error)
  671 {
  672         struct ktr_request *req;
  673         struct ktr_genio *ktg;
  674         int datalen;
  675         char *buf;
  676 
  677         if (error) {
  678                 free(uio, M_IOV);
  679                 return;
  680         }
  681         uio->uio_offset = 0;
  682         uio->uio_rw = UIO_WRITE;
  683         datalen = MIN(uio->uio_resid, ktr_geniosize);
  684         buf = malloc(datalen, M_KTRACE, M_WAITOK);
  685         error = uiomove(buf, datalen, uio);
  686         free(uio, M_IOV);
  687         if (error) {
  688                 free(buf, M_KTRACE);
  689                 return;
  690         }
  691         req = ktr_getrequest(KTR_GENIO);
  692         if (req == NULL) {
  693                 free(buf, M_KTRACE);
  694                 return;
  695         }
  696         ktg = &req->ktr_data.ktr_genio;
  697         ktg->ktr_fd = fd;
  698         ktg->ktr_rw = rw;
  699         req->ktr_header.ktr_len = datalen;
  700         req->ktr_buffer = buf;
  701         ktr_submitrequest(curthread, req);
  702 }
  703 
  704 void
  705 ktrpsig(int sig, sig_t action, sigset_t *mask, int code)
  706 {
  707         struct thread *td = curthread;
  708         struct ktr_request *req;
  709         struct ktr_psig *kp;
  710 
  711         req = ktr_getrequest(KTR_PSIG);
  712         if (req == NULL)
  713                 return;
  714         kp = &req->ktr_data.ktr_psig;
  715         kp->signo = (char)sig;
  716         kp->action = action;
  717         kp->mask = *mask;
  718         kp->code = code;
  719         ktr_enqueuerequest(td, req);
  720         ktrace_exit(td);
  721 }
  722 
  723 void
  724 ktrcsw(int out, int user, const char *wmesg)
  725 {
  726         struct thread *td = curthread;
  727         struct ktr_request *req;
  728         struct ktr_csw *kc;
  729 
  730         req = ktr_getrequest(KTR_CSW);
  731         if (req == NULL)
  732                 return;
  733         kc = &req->ktr_data.ktr_csw;
  734         kc->out = out;
  735         kc->user = user;
  736         if (wmesg != NULL)
  737                 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg));
  738         else
  739                 bzero(kc->wmesg, sizeof(kc->wmesg));
  740         ktr_enqueuerequest(td, req);
  741         ktrace_exit(td);
  742 }
  743 
  744 void
  745 ktrstruct(const char *name, const void *data, size_t datalen)
  746 {
  747         struct ktr_request *req;
  748         char *buf;
  749         size_t buflen, namelen;
  750 
  751         if (data == NULL)
  752                 datalen = 0;
  753         namelen = strlen(name) + 1;
  754         buflen = namelen + datalen;
  755         buf = malloc(buflen, M_KTRACE, M_WAITOK);
  756         strcpy(buf, name);
  757         bcopy(data, buf + namelen, datalen);
  758         if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
  759                 free(buf, M_KTRACE);
  760                 return;
  761         }
  762         req->ktr_buffer = buf;
  763         req->ktr_header.ktr_len = buflen;
  764         ktr_submitrequest(curthread, req);
  765 }
  766 
  767 void
  768 ktrstructarray(const char *name, enum uio_seg seg, const void *data,
  769     int num_items, size_t struct_size)
  770 {
  771         struct ktr_request *req;
  772         struct ktr_struct_array *ksa;
  773         char *buf;
  774         size_t buflen, datalen, namelen;
  775         int max_items;
  776 
  777         /* Trim array length to genio size. */
  778         max_items = ktr_geniosize / struct_size;
  779         if (num_items > max_items) {
  780                 if (max_items == 0)
  781                         num_items = 1;
  782                 else
  783                         num_items = max_items;
  784         }
  785         datalen = num_items * struct_size;
  786 
  787         if (data == NULL)
  788                 datalen = 0;
  789 
  790         namelen = strlen(name) + 1;
  791         buflen = namelen + datalen;
  792         buf = malloc(buflen, M_KTRACE, M_WAITOK);
  793         strcpy(buf, name);
  794         if (seg == UIO_SYSSPACE)
  795                 bcopy(data, buf + namelen, datalen);
  796         else {
  797                 if (copyin(data, buf + namelen, datalen) != 0) {
  798                         free(buf, M_KTRACE);
  799                         return;
  800                 }
  801         }
  802         if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) {
  803                 free(buf, M_KTRACE);
  804                 return;
  805         }
  806         ksa = &req->ktr_data.ktr_struct_array;
  807         ksa->struct_size = struct_size;
  808         req->ktr_buffer = buf;
  809         req->ktr_header.ktr_len = buflen;
  810         ktr_submitrequest(curthread, req);
  811 }
  812 
  813 void
  814 ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed,
  815     const cap_rights_t *held)
  816 {
  817         struct thread *td = curthread;
  818         struct ktr_request *req;
  819         struct ktr_cap_fail *kcf;
  820 
  821         req = ktr_getrequest(KTR_CAPFAIL);
  822         if (req == NULL)
  823                 return;
  824         kcf = &req->ktr_data.ktr_cap_fail;
  825         kcf->cap_type = type;
  826         if (needed != NULL)
  827                 kcf->cap_needed = *needed;
  828         else
  829                 cap_rights_init(&kcf->cap_needed);
  830         if (held != NULL)
  831                 kcf->cap_held = *held;
  832         else
  833                 cap_rights_init(&kcf->cap_held);
  834         ktr_enqueuerequest(td, req);
  835         ktrace_exit(td);
  836 }
  837 
  838 void
  839 ktrfault(vm_offset_t vaddr, int type)
  840 {
  841         struct thread *td = curthread;
  842         struct ktr_request *req;
  843         struct ktr_fault *kf;
  844 
  845         req = ktr_getrequest(KTR_FAULT);
  846         if (req == NULL)
  847                 return;
  848         kf = &req->ktr_data.ktr_fault;
  849         kf->vaddr = vaddr;
  850         kf->type = type;
  851         ktr_enqueuerequest(td, req);
  852         ktrace_exit(td);
  853 }
  854 
  855 void
  856 ktrfaultend(int result)
  857 {
  858         struct thread *td = curthread;
  859         struct ktr_request *req;
  860         struct ktr_faultend *kf;
  861 
  862         req = ktr_getrequest(KTR_FAULTEND);
  863         if (req == NULL)
  864                 return;
  865         kf = &req->ktr_data.ktr_faultend;
  866         kf->result = result;
  867         ktr_enqueuerequest(td, req);
  868         ktrace_exit(td);
  869 }
  870 #endif /* KTRACE */
  871 
  872 /* Interface and common routines */
  873 
  874 #ifndef _SYS_SYSPROTO_H_
  875 struct ktrace_args {
  876         char    *fname;
  877         int     ops;
  878         int     facs;
  879         int     pid;
  880 };
  881 #endif
  882 /* ARGSUSED */
  883 int
  884 sys_ktrace(struct thread *td, struct ktrace_args *uap)
  885 {
  886 #ifdef KTRACE
  887         struct vnode *vp = NULL;
  888         struct proc *p;
  889         struct pgrp *pg;
  890         int facs = uap->facs & ~KTRFAC_ROOT;
  891         int ops = KTROP(uap->ops);
  892         int descend = uap->ops & KTRFLAG_DESCEND;
  893         int nfound, ret = 0;
  894         int flags, error = 0;
  895         struct nameidata nd;
  896         struct ucred *cred;
  897 
  898         /*
  899          * Need something to (un)trace.
  900          */
  901         if (ops != KTROP_CLEARFILE && facs == 0)
  902                 return (EINVAL);
  903 
  904         ktrace_enter(td);
  905         if (ops != KTROP_CLEAR) {
  906                 /*
  907                  * an operation which requires a file argument.
  908                  */
  909                 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
  910                 flags = FREAD | FWRITE | O_NOFOLLOW;
  911                 error = vn_open(&nd, &flags, 0, NULL);
  912                 if (error) {
  913                         ktrace_exit(td);
  914                         return (error);
  915                 }
  916                 NDFREE(&nd, NDF_ONLY_PNBUF);
  917                 vp = nd.ni_vp;
  918                 VOP_UNLOCK(vp, 0);
  919                 if (vp->v_type != VREG) {
  920                         (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
  921                         ktrace_exit(td);
  922                         return (EACCES);
  923                 }
  924         }
  925         /*
  926          * Clear all uses of the tracefile.
  927          */
  928         if (ops == KTROP_CLEARFILE) {
  929                 int vrele_count;
  930 
  931                 vrele_count = 0;
  932                 sx_slock(&allproc_lock);
  933                 FOREACH_PROC_IN_SYSTEM(p) {
  934                         PROC_LOCK(p);
  935                         if (p->p_tracevp == vp) {
  936                                 if (ktrcanset(td, p)) {
  937                                         mtx_lock(&ktrace_mtx);
  938                                         ktr_freeproc(p, &cred, NULL);
  939                                         mtx_unlock(&ktrace_mtx);
  940                                         vrele_count++;
  941                                         crfree(cred);
  942                                 } else
  943                                         error = EPERM;
  944                         }
  945                         PROC_UNLOCK(p);
  946                 }
  947                 sx_sunlock(&allproc_lock);
  948                 if (vrele_count > 0) {
  949                         while (vrele_count-- > 0)
  950                                 vrele(vp);
  951                 }
  952                 goto done;
  953         }
  954         /*
  955          * do it
  956          */
  957         sx_slock(&proctree_lock);
  958         if (uap->pid < 0) {
  959                 /*
  960                  * by process group
  961                  */
  962                 pg = pgfind(-uap->pid);
  963                 if (pg == NULL) {
  964                         sx_sunlock(&proctree_lock);
  965                         error = ESRCH;
  966                         goto done;
  967                 }
  968                 /*
  969                  * ktrops() may call vrele(). Lock pg_members
  970                  * by the proctree_lock rather than pg_mtx.
  971                  */
  972                 PGRP_UNLOCK(pg);
  973                 nfound = 0;
  974                 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
  975                         PROC_LOCK(p);
  976                         if (p->p_state == PRS_NEW ||
  977                             p_cansee(td, p) != 0) {
  978                                 PROC_UNLOCK(p); 
  979                                 continue;
  980                         }
  981                         nfound++;
  982                         if (descend)
  983                                 ret |= ktrsetchildren(td, p, ops, facs, vp);
  984                         else
  985                                 ret |= ktrops(td, p, ops, facs, vp);
  986                 }
  987                 if (nfound == 0) {
  988                         sx_sunlock(&proctree_lock);
  989                         error = ESRCH;
  990                         goto done;
  991                 }
  992         } else {
  993                 /*
  994                  * by pid
  995                  */
  996                 p = pfind(uap->pid);
  997                 if (p == NULL)
  998                         error = ESRCH;
  999                 else
 1000                         error = p_cansee(td, p);
 1001                 if (error) {
 1002                         if (p != NULL)
 1003                                 PROC_UNLOCK(p);
 1004                         sx_sunlock(&proctree_lock);
 1005                         goto done;
 1006                 }
 1007                 if (descend)
 1008                         ret |= ktrsetchildren(td, p, ops, facs, vp);
 1009                 else
 1010                         ret |= ktrops(td, p, ops, facs, vp);
 1011         }
 1012         sx_sunlock(&proctree_lock);
 1013         if (!ret)
 1014                 error = EPERM;
 1015 done:
 1016         if (vp != NULL)
 1017                 (void) vn_close(vp, FWRITE, td->td_ucred, td);
 1018         ktrace_exit(td);
 1019         return (error);
 1020 #else /* !KTRACE */
 1021         return (ENOSYS);
 1022 #endif /* KTRACE */
 1023 }
 1024 
 1025 /* ARGSUSED */
 1026 int
 1027 sys_utrace(struct thread *td, struct utrace_args *uap)
 1028 {
 1029 
 1030 #ifdef KTRACE
 1031         struct ktr_request *req;
 1032         void *cp;
 1033         int error;
 1034 
 1035         if (!KTRPOINT(td, KTR_USER))
 1036                 return (0);
 1037         if (uap->len > KTR_USER_MAXLEN)
 1038                 return (EINVAL);
 1039         cp = malloc(uap->len, M_KTRACE, M_WAITOK);
 1040         error = copyin(uap->addr, cp, uap->len);
 1041         if (error) {
 1042                 free(cp, M_KTRACE);
 1043                 return (error);
 1044         }
 1045         req = ktr_getrequest(KTR_USER);
 1046         if (req == NULL) {
 1047                 free(cp, M_KTRACE);
 1048                 return (ENOMEM);
 1049         }
 1050         req->ktr_buffer = cp;
 1051         req->ktr_header.ktr_len = uap->len;
 1052         ktr_submitrequest(td, req);
 1053         return (0);
 1054 #else /* !KTRACE */
 1055         return (ENOSYS);
 1056 #endif /* KTRACE */
 1057 }
 1058 
 1059 #ifdef KTRACE
 1060 static int
 1061 ktrops(struct thread *td, struct proc *p, int ops, int facs, struct vnode *vp)
 1062 {
 1063         struct vnode *tracevp = NULL;
 1064         struct ucred *tracecred = NULL;
 1065 
 1066         PROC_LOCK_ASSERT(p, MA_OWNED);
 1067         if (!ktrcanset(td, p)) {
 1068                 PROC_UNLOCK(p);
 1069                 return (0);
 1070         }
 1071         if (p->p_flag & P_WEXIT) {
 1072                 /* If the process is exiting, just ignore it. */
 1073                 PROC_UNLOCK(p);
 1074                 return (1);
 1075         }
 1076         mtx_lock(&ktrace_mtx);
 1077         if (ops == KTROP_SET) {
 1078                 if (p->p_tracevp != vp) {
 1079                         /*
 1080                          * if trace file already in use, relinquish below
 1081                          */
 1082                         tracevp = p->p_tracevp;
 1083                         VREF(vp);
 1084                         p->p_tracevp = vp;
 1085                 }
 1086                 if (p->p_tracecred != td->td_ucred) {
 1087                         tracecred = p->p_tracecred;
 1088                         p->p_tracecred = crhold(td->td_ucred);
 1089                 }
 1090                 p->p_traceflag |= facs;
 1091                 if (priv_check(td, PRIV_KTRACE) == 0)
 1092                         p->p_traceflag |= KTRFAC_ROOT;
 1093         } else {
 1094                 /* KTROP_CLEAR */
 1095                 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
 1096                         /* no more tracing */
 1097                         ktr_freeproc(p, &tracecred, &tracevp);
 1098         }
 1099         mtx_unlock(&ktrace_mtx);
 1100         if ((p->p_traceflag & KTRFAC_MASK) != 0)
 1101                 ktrprocctor_entered(td, p);
 1102         PROC_UNLOCK(p);
 1103         if (tracevp != NULL)
 1104                 vrele(tracevp);
 1105         if (tracecred != NULL)
 1106                 crfree(tracecred);
 1107 
 1108         return (1);
 1109 }
 1110 
 1111 static int
 1112 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs,
 1113     struct vnode *vp)
 1114 {
 1115         struct proc *p;
 1116         int ret = 0;
 1117 
 1118         p = top;
 1119         PROC_LOCK_ASSERT(p, MA_OWNED);
 1120         sx_assert(&proctree_lock, SX_LOCKED);
 1121         for (;;) {
 1122                 ret |= ktrops(td, p, ops, facs, vp);
 1123                 /*
 1124                  * If this process has children, descend to them next,
 1125                  * otherwise do any siblings, and if done with this level,
 1126                  * follow back up the tree (but not past top).
 1127                  */
 1128                 if (!LIST_EMPTY(&p->p_children))
 1129                         p = LIST_FIRST(&p->p_children);
 1130                 else for (;;) {
 1131                         if (p == top)
 1132                                 return (ret);
 1133                         if (LIST_NEXT(p, p_sibling)) {
 1134                                 p = LIST_NEXT(p, p_sibling);
 1135                                 break;
 1136                         }
 1137                         p = p->p_pptr;
 1138                 }
 1139                 PROC_LOCK(p);
 1140         }
 1141         /*NOTREACHED*/
 1142 }
 1143 
 1144 static void
 1145 ktr_writerequest(struct thread *td, struct ktr_request *req)
 1146 {
 1147         struct ktr_header *kth;
 1148         struct vnode *vp;
 1149         struct proc *p;
 1150         struct ucred *cred;
 1151         struct uio auio;
 1152         struct iovec aiov[3];
 1153         struct mount *mp;
 1154         int datalen, buflen, vrele_count;
 1155         int error;
 1156 
 1157         /*
 1158          * We hold the vnode and credential for use in I/O in case ktrace is
 1159          * disabled on the process as we write out the request.
 1160          *
 1161          * XXXRW: This is not ideal: we could end up performing a write after
 1162          * the vnode has been closed.
 1163          */
 1164         mtx_lock(&ktrace_mtx);
 1165         vp = td->td_proc->p_tracevp;
 1166         cred = td->td_proc->p_tracecred;
 1167 
 1168         /*
 1169          * If vp is NULL, the vp has been cleared out from under this
 1170          * request, so just drop it.  Make sure the credential and vnode are
 1171          * in sync: we should have both or neither.
 1172          */
 1173         if (vp == NULL) {
 1174                 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
 1175                 mtx_unlock(&ktrace_mtx);
 1176                 return;
 1177         }
 1178         VREF(vp);
 1179         KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
 1180         crhold(cred);
 1181         mtx_unlock(&ktrace_mtx);
 1182 
 1183         kth = &req->ktr_header;
 1184         KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) < nitems(data_lengths),
 1185             ("data_lengths array overflow"));
 1186         datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
 1187         buflen = kth->ktr_len;
 1188         auio.uio_iov = &aiov[0];
 1189         auio.uio_offset = 0;
 1190         auio.uio_segflg = UIO_SYSSPACE;
 1191         auio.uio_rw = UIO_WRITE;
 1192         aiov[0].iov_base = (caddr_t)kth;
 1193         aiov[0].iov_len = sizeof(struct ktr_header);
 1194         auio.uio_resid = sizeof(struct ktr_header);
 1195         auio.uio_iovcnt = 1;
 1196         auio.uio_td = td;
 1197         if (datalen != 0) {
 1198                 aiov[1].iov_base = (caddr_t)&req->ktr_data;
 1199                 aiov[1].iov_len = datalen;
 1200                 auio.uio_resid += datalen;
 1201                 auio.uio_iovcnt++;
 1202                 kth->ktr_len += datalen;
 1203         }
 1204         if (buflen != 0) {
 1205                 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
 1206                 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
 1207                 aiov[auio.uio_iovcnt].iov_len = buflen;
 1208                 auio.uio_resid += buflen;
 1209                 auio.uio_iovcnt++;
 1210         }
 1211 
 1212         vn_start_write(vp, &mp, V_WAIT);
 1213         vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
 1214 #ifdef MAC
 1215         error = mac_vnode_check_write(cred, NOCRED, vp);
 1216         if (error == 0)
 1217 #endif
 1218                 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
 1219         VOP_UNLOCK(vp, 0);
 1220         vn_finished_write(mp);
 1221         crfree(cred);
 1222         if (!error) {
 1223                 vrele(vp);
 1224                 return;
 1225         }
 1226 
 1227         /*
 1228          * If error encountered, give up tracing on this vnode.  We defer
 1229          * all the vrele()'s on the vnode until after we are finished walking
 1230          * the various lists to avoid needlessly holding locks.
 1231          * NB: at this point we still hold the vnode reference that must
 1232          * not go away as we need the valid vnode to compare with. Thus let
 1233          * vrele_count start at 1 and the reference will be freed
 1234          * by the loop at the end after our last use of vp.
 1235          */
 1236         log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
 1237             error);
 1238         vrele_count = 1;
 1239         /*
 1240          * First, clear this vnode from being used by any processes in the
 1241          * system.
 1242          * XXX - If one process gets an EPERM writing to the vnode, should
 1243          * we really do this?  Other processes might have suitable
 1244          * credentials for the operation.
 1245          */
 1246         cred = NULL;
 1247         sx_slock(&allproc_lock);
 1248         FOREACH_PROC_IN_SYSTEM(p) {
 1249                 PROC_LOCK(p);
 1250                 if (p->p_tracevp == vp) {
 1251                         mtx_lock(&ktrace_mtx);
 1252                         ktr_freeproc(p, &cred, NULL);
 1253                         mtx_unlock(&ktrace_mtx);
 1254                         vrele_count++;
 1255                 }
 1256                 PROC_UNLOCK(p);
 1257                 if (cred != NULL) {
 1258                         crfree(cred);
 1259                         cred = NULL;
 1260                 }
 1261         }
 1262         sx_sunlock(&allproc_lock);
 1263 
 1264         while (vrele_count-- > 0)
 1265                 vrele(vp);
 1266 }
 1267 
 1268 /*
 1269  * Return true if caller has permission to set the ktracing state
 1270  * of target.  Essentially, the target can't possess any
 1271  * more permissions than the caller.  KTRFAC_ROOT signifies that
 1272  * root previously set the tracing status on the target process, and
 1273  * so, only root may further change it.
 1274  */
 1275 static int
 1276 ktrcanset(struct thread *td, struct proc *targetp)
 1277 {
 1278 
 1279         PROC_LOCK_ASSERT(targetp, MA_OWNED);
 1280         if (targetp->p_traceflag & KTRFAC_ROOT &&
 1281             priv_check(td, PRIV_KTRACE))
 1282                 return (0);
 1283 
 1284         if (p_candebug(td, targetp) != 0)
 1285                 return (0);
 1286 
 1287         return (1);
 1288 }
 1289 
 1290 #endif /* KTRACE */

Cache object: e3815934d6f6c22ca9c920f0e88df5e0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.