1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/6.3/sys/kern/kern_ktrace.c 173886 2007-11-24 19:45:58Z cvs2svn $");
36
37 #include "opt_ktrace.h"
38 #include "opt_mac.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/mac.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/ktrace.h>
55 #include <sys/sx.h>
56 #include <sys/condvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/sysproto.h>
60
61 /*
62 * The ktrace facility allows the tracing of certain key events in user space
63 * processes, such as system calls, signal delivery, context switches, and
64 * user generated events using utrace(2). It works by streaming event
65 * records and data to a vnode associated with the process using the
66 * ktrace(2) system call. In general, records can be written directly from
67 * the context that generates the event. One important exception to this is
68 * during a context switch, where sleeping is not permitted. To handle this
69 * case, trace events are generated using in-kernel ktr_request records, and
70 * then delivered to disk at a convenient moment -- either immediately, the
71 * next traceable event, at system call return, or at process exit.
72 *
73 * When dealing with multiple threads or processes writing to the same event
74 * log, ordering guarantees are weak: specifically, if an event has multiple
75 * records (i.e., system call enter and return), they may be interlaced with
76 * records from another event. Process and thread ID information is provided
77 * in the record, and user applications can de-interlace events if required.
78 */
79
80 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
81
82 #ifdef KTRACE
83
84 #ifndef KTRACE_REQUEST_POOL
85 #define KTRACE_REQUEST_POOL 100
86 #endif
87
88 struct ktr_request {
89 struct ktr_header ktr_header;
90 void *ktr_buffer;
91 union {
92 struct ktr_syscall ktr_syscall;
93 struct ktr_sysret ktr_sysret;
94 struct ktr_genio ktr_genio;
95 struct ktr_psig ktr_psig;
96 struct ktr_csw ktr_csw;
97 } ktr_data;
98 STAILQ_ENTRY(ktr_request) ktr_list;
99 };
100
101 static int data_lengths[] = {
102 0, /* none */
103 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
104 sizeof(struct ktr_sysret), /* KTR_SYSRET */
105 0, /* KTR_NAMEI */
106 sizeof(struct ktr_genio), /* KTR_GENIO */
107 sizeof(struct ktr_psig), /* KTR_PSIG */
108 sizeof(struct ktr_csw), /* KTR_CSW */
109 0 /* KTR_USER */
110 };
111
112 static STAILQ_HEAD(, ktr_request) ktr_free;
113
114 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
115
116 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
117 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
118
119 static u_int ktr_geniosize = PAGE_SIZE;
120 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
121 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
122 0, "Maximum size of genio event payload");
123
124 static int print_message = 1;
125 struct mtx ktrace_mtx;
126 static struct cv ktrace_cv;
127 static struct sx ktrace_sx;
128
129 static void ktrace_init(void *dummy);
130 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
131 static u_int ktrace_resize_pool(u_int newsize);
132 static struct ktr_request *ktr_getrequest(int type);
133 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
134 static void ktr_freerequest(struct ktr_request *req);
135 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
136 static int ktrcanset(struct thread *,struct proc *);
137 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
138 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
139
140 /*
141 * ktrace itself generates events, such as context switches, which we do not
142 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
143 * whether or not it is in a region where tracing of events should be
144 * suppressed.
145 */
146 static void
147 ktrace_enter(struct thread *td)
148 {
149
150 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
151 td->td_pflags |= TDP_INKTRACE;
152 }
153
154 static void
155 ktrace_exit(struct thread *td)
156 {
157
158 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
159 td->td_pflags &= ~TDP_INKTRACE;
160 }
161
162 static void
163 ktrace_assert(struct thread *td)
164 {
165
166 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
167 }
168
169 static void
170 ktrace_init(void *dummy)
171 {
172 struct ktr_request *req;
173 int i;
174
175 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
176 sx_init(&ktrace_sx, "ktrace_sx");
177 cv_init(&ktrace_cv, "ktrace");
178 STAILQ_INIT(&ktr_free);
179 for (i = 0; i < ktr_requestpool; i++) {
180 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
181 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
182 }
183 }
184 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
185
186 static int
187 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
188 {
189 struct thread *td;
190 u_int newsize, oldsize, wantsize;
191 int error;
192
193 /* Handle easy read-only case first to avoid warnings from GCC. */
194 if (!req->newptr) {
195 mtx_lock(&ktrace_mtx);
196 oldsize = ktr_requestpool;
197 mtx_unlock(&ktrace_mtx);
198 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
199 }
200
201 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
202 if (error)
203 return (error);
204 td = curthread;
205 ktrace_enter(td);
206 mtx_lock(&ktrace_mtx);
207 oldsize = ktr_requestpool;
208 newsize = ktrace_resize_pool(wantsize);
209 mtx_unlock(&ktrace_mtx);
210 ktrace_exit(td);
211 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
212 if (error)
213 return (error);
214 if (wantsize > oldsize && newsize < wantsize)
215 return (ENOSPC);
216 return (0);
217 }
218 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
219 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
220
221 static u_int
222 ktrace_resize_pool(u_int newsize)
223 {
224 struct ktr_request *req;
225 int bound;
226
227 mtx_assert(&ktrace_mtx, MA_OWNED);
228 print_message = 1;
229 bound = newsize - ktr_requestpool;
230 if (bound == 0)
231 return (ktr_requestpool);
232 if (bound < 0)
233 /* Shrink pool down to newsize if possible. */
234 while (bound++ < 0) {
235 req = STAILQ_FIRST(&ktr_free);
236 if (req == NULL)
237 return (ktr_requestpool);
238 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
239 ktr_requestpool--;
240 mtx_unlock(&ktrace_mtx);
241 free(req, M_KTRACE);
242 mtx_lock(&ktrace_mtx);
243 }
244 else
245 /* Grow pool up to newsize. */
246 while (bound-- > 0) {
247 mtx_unlock(&ktrace_mtx);
248 req = malloc(sizeof(struct ktr_request), M_KTRACE,
249 M_WAITOK);
250 mtx_lock(&ktrace_mtx);
251 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
252 ktr_requestpool++;
253 }
254 return (ktr_requestpool);
255 }
256
257 static struct ktr_request *
258 ktr_getrequest(int type)
259 {
260 struct ktr_request *req;
261 struct thread *td = curthread;
262 struct proc *p = td->td_proc;
263 int pm;
264
265 ktrace_enter(td); /* XXX: In caller instead? */
266 mtx_lock(&ktrace_mtx);
267 if (!KTRCHECK(td, type)) {
268 mtx_unlock(&ktrace_mtx);
269 ktrace_exit(td);
270 return (NULL);
271 }
272 req = STAILQ_FIRST(&ktr_free);
273 if (req != NULL) {
274 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
275 req->ktr_header.ktr_type = type;
276 if (p->p_traceflag & KTRFAC_DROP) {
277 req->ktr_header.ktr_type |= KTR_DROP;
278 p->p_traceflag &= ~KTRFAC_DROP;
279 }
280 mtx_unlock(&ktrace_mtx);
281 microtime(&req->ktr_header.ktr_time);
282 req->ktr_header.ktr_pid = p->p_pid;
283 req->ktr_header.ktr_tid = td->td_tid;
284 bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
285 req->ktr_buffer = NULL;
286 req->ktr_header.ktr_len = 0;
287 } else {
288 p->p_traceflag |= KTRFAC_DROP;
289 pm = print_message;
290 print_message = 0;
291 mtx_unlock(&ktrace_mtx);
292 if (pm)
293 printf("Out of ktrace request objects.\n");
294 ktrace_exit(td);
295 }
296 return (req);
297 }
298
299 /*
300 * Some trace generation environments don't permit direct access to VFS,
301 * such as during a context switch where sleeping is not allowed. Under these
302 * circumstances, queue a request to the thread to be written asynchronously
303 * later.
304 */
305 static void
306 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
307 {
308
309 mtx_lock(&ktrace_mtx);
310 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
311 mtx_unlock(&ktrace_mtx);
312 ktrace_exit(td);
313 }
314
315 /*
316 * Drain any pending ktrace records from the per-thread queue to disk. This
317 * is used both internally before committing other records, and also on
318 * system call return. We drain all the ones we can find at the time when
319 * drain is requested, but don't keep draining after that as those events
320 * may me approximately "after" the current event.
321 */
322 static void
323 ktr_drain(struct thread *td)
324 {
325 struct ktr_request *queued_req;
326 STAILQ_HEAD(, ktr_request) local_queue;
327
328 ktrace_assert(td);
329 sx_assert(&ktrace_sx, SX_XLOCKED);
330
331 STAILQ_INIT(&local_queue); /* XXXRW: needed? */
332
333 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
334 mtx_lock(&ktrace_mtx);
335 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
336 mtx_unlock(&ktrace_mtx);
337
338 while ((queued_req = STAILQ_FIRST(&local_queue))) {
339 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
340 ktr_writerequest(td, queued_req);
341 ktr_freerequest(queued_req);
342 }
343 }
344 }
345
346 /*
347 * Submit a trace record for immediate commit to disk -- to be used only
348 * where entering VFS is OK. First drain any pending records that may have
349 * been cached in the thread.
350 */
351 static void
352 ktr_submitrequest(struct thread *td, struct ktr_request *req)
353 {
354
355 ktrace_assert(td);
356
357 sx_xlock(&ktrace_sx);
358 ktr_drain(td);
359 ktr_writerequest(td, req);
360 ktr_freerequest(req);
361 sx_xunlock(&ktrace_sx);
362
363 ktrace_exit(td);
364 }
365
366 static void
367 ktr_freerequest(struct ktr_request *req)
368 {
369
370 if (req->ktr_buffer != NULL)
371 free(req->ktr_buffer, M_KTRACE);
372 mtx_lock(&ktrace_mtx);
373 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
374 mtx_unlock(&ktrace_mtx);
375 }
376
377 /*
378 * MPSAFE
379 */
380 void
381 ktrsyscall(code, narg, args)
382 int code, narg;
383 register_t args[];
384 {
385 struct ktr_request *req;
386 struct ktr_syscall *ktp;
387 size_t buflen;
388 char *buf = NULL;
389
390 buflen = sizeof(register_t) * narg;
391 if (buflen > 0) {
392 buf = malloc(buflen, M_KTRACE, M_WAITOK);
393 bcopy(args, buf, buflen);
394 }
395 req = ktr_getrequest(KTR_SYSCALL);
396 if (req == NULL) {
397 if (buf != NULL)
398 free(buf, M_KTRACE);
399 return;
400 }
401 ktp = &req->ktr_data.ktr_syscall;
402 ktp->ktr_code = code;
403 ktp->ktr_narg = narg;
404 if (buflen > 0) {
405 req->ktr_header.ktr_len = buflen;
406 req->ktr_buffer = buf;
407 }
408 ktr_submitrequest(curthread, req);
409 }
410
411 /*
412 * MPSAFE
413 */
414 void
415 ktrsysret(code, error, retval)
416 int code, error;
417 register_t retval;
418 {
419 struct ktr_request *req;
420 struct ktr_sysret *ktp;
421
422 req = ktr_getrequest(KTR_SYSRET);
423 if (req == NULL)
424 return;
425 ktp = &req->ktr_data.ktr_sysret;
426 ktp->ktr_code = code;
427 ktp->ktr_error = error;
428 ktp->ktr_retval = retval; /* what about val2 ? */
429 ktr_submitrequest(curthread, req);
430 }
431
432 /*
433 * When a process exits, drain per-process asynchronous trace records.
434 */
435 void
436 ktrprocexit(struct thread *td)
437 {
438
439 ktrace_enter(td);
440 sx_xlock(&ktrace_sx);
441 ktr_drain(td);
442 sx_xunlock(&ktrace_sx);
443 ktrace_exit(td);
444 }
445
446 /*
447 * When a thread returns, drain any asynchronous records generated by the
448 * system call.
449 */
450 void
451 ktruserret(struct thread *td)
452 {
453
454 ktrace_enter(td);
455 sx_xlock(&ktrace_sx);
456 ktr_drain(td);
457 sx_xunlock(&ktrace_sx);
458 ktrace_exit(td);
459 }
460
461 void
462 ktrnamei(path)
463 char *path;
464 {
465 struct ktr_request *req;
466 int namelen;
467 char *buf = NULL;
468
469 namelen = strlen(path);
470 if (namelen > 0) {
471 buf = malloc(namelen, M_KTRACE, M_WAITOK);
472 bcopy(path, buf, namelen);
473 }
474 req = ktr_getrequest(KTR_NAMEI);
475 if (req == NULL) {
476 if (buf != NULL)
477 free(buf, M_KTRACE);
478 return;
479 }
480 if (namelen > 0) {
481 req->ktr_header.ktr_len = namelen;
482 req->ktr_buffer = buf;
483 }
484 ktr_submitrequest(curthread, req);
485 }
486
487 void
488 ktrgenio(fd, rw, uio, error)
489 int fd;
490 enum uio_rw rw;
491 struct uio *uio;
492 int error;
493 {
494 struct ktr_request *req;
495 struct ktr_genio *ktg;
496 int datalen;
497 char *buf;
498
499 if (error) {
500 free(uio, M_IOV);
501 return;
502 }
503 uio->uio_offset = 0;
504 uio->uio_rw = UIO_WRITE;
505 datalen = imin(uio->uio_resid, ktr_geniosize);
506 buf = malloc(datalen, M_KTRACE, M_WAITOK);
507 error = uiomove(buf, datalen, uio);
508 free(uio, M_IOV);
509 if (error) {
510 free(buf, M_KTRACE);
511 return;
512 }
513 req = ktr_getrequest(KTR_GENIO);
514 if (req == NULL) {
515 free(buf, M_KTRACE);
516 return;
517 }
518 ktg = &req->ktr_data.ktr_genio;
519 ktg->ktr_fd = fd;
520 ktg->ktr_rw = rw;
521 req->ktr_header.ktr_len = datalen;
522 req->ktr_buffer = buf;
523 ktr_submitrequest(curthread, req);
524 }
525
526 void
527 ktrpsig(sig, action, mask, code)
528 int sig;
529 sig_t action;
530 sigset_t *mask;
531 int code;
532 {
533 struct ktr_request *req;
534 struct ktr_psig *kp;
535
536 req = ktr_getrequest(KTR_PSIG);
537 if (req == NULL)
538 return;
539 kp = &req->ktr_data.ktr_psig;
540 kp->signo = (char)sig;
541 kp->action = action;
542 kp->mask = *mask;
543 kp->code = code;
544 ktr_enqueuerequest(curthread, req);
545 }
546
547 void
548 ktrcsw(out, user)
549 int out, user;
550 {
551 struct ktr_request *req;
552 struct ktr_csw *kc;
553
554 req = ktr_getrequest(KTR_CSW);
555 if (req == NULL)
556 return;
557 kc = &req->ktr_data.ktr_csw;
558 kc->out = out;
559 kc->user = user;
560 ktr_enqueuerequest(curthread, req);
561 }
562 #endif /* KTRACE */
563
564 /* Interface and common routines */
565
566 /*
567 * ktrace system call
568 *
569 * MPSAFE
570 */
571 #ifndef _SYS_SYSPROTO_H_
572 struct ktrace_args {
573 char *fname;
574 int ops;
575 int facs;
576 int pid;
577 };
578 #endif
579 /* ARGSUSED */
580 int
581 ktrace(td, uap)
582 struct thread *td;
583 register struct ktrace_args *uap;
584 {
585 #ifdef KTRACE
586 register struct vnode *vp = NULL;
587 register struct proc *p;
588 struct pgrp *pg;
589 int facs = uap->facs & ~KTRFAC_ROOT;
590 int ops = KTROP(uap->ops);
591 int descend = uap->ops & KTRFLAG_DESCEND;
592 int nfound, ret = 0;
593 int flags, error = 0;
594 struct nameidata nd;
595 struct ucred *cred;
596
597 /*
598 * Need something to (un)trace.
599 */
600 if (ops != KTROP_CLEARFILE && facs == 0)
601 return (EINVAL);
602
603 ktrace_enter(td);
604 if (ops != KTROP_CLEAR) {
605 /*
606 * an operation which requires a file argument.
607 */
608 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
609 flags = FREAD | FWRITE | O_NOFOLLOW;
610 mtx_lock(&Giant);
611 error = vn_open(&nd, &flags, 0, -1);
612 if (error) {
613 mtx_unlock(&Giant);
614 ktrace_exit(td);
615 return (error);
616 }
617 NDFREE(&nd, NDF_ONLY_PNBUF);
618 vp = nd.ni_vp;
619 VOP_UNLOCK(vp, 0, td);
620 if (vp->v_type != VREG) {
621 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
622 mtx_unlock(&Giant);
623 ktrace_exit(td);
624 return (EACCES);
625 }
626 mtx_unlock(&Giant);
627 }
628 /*
629 * Clear all uses of the tracefile.
630 */
631 if (ops == KTROP_CLEARFILE) {
632 sx_slock(&allproc_lock);
633 LIST_FOREACH(p, &allproc, p_list) {
634 PROC_LOCK(p);
635 if (p->p_tracevp == vp) {
636 if (ktrcanset(td, p)) {
637 mtx_lock(&ktrace_mtx);
638 cred = p->p_tracecred;
639 p->p_tracecred = NULL;
640 p->p_tracevp = NULL;
641 p->p_traceflag = 0;
642 mtx_unlock(&ktrace_mtx);
643 PROC_UNLOCK(p);
644 mtx_lock(&Giant);
645 (void) vn_close(vp, FREAD|FWRITE,
646 cred, td);
647 mtx_unlock(&Giant);
648 crfree(cred);
649 } else {
650 PROC_UNLOCK(p);
651 error = EPERM;
652 }
653 } else
654 PROC_UNLOCK(p);
655 }
656 sx_sunlock(&allproc_lock);
657 goto done;
658 }
659 /*
660 * do it
661 */
662 sx_slock(&proctree_lock);
663 if (uap->pid < 0) {
664 /*
665 * by process group
666 */
667 pg = pgfind(-uap->pid);
668 if (pg == NULL) {
669 sx_sunlock(&proctree_lock);
670 error = ESRCH;
671 goto done;
672 }
673 /*
674 * ktrops() may call vrele(). Lock pg_members
675 * by the proctree_lock rather than pg_mtx.
676 */
677 PGRP_UNLOCK(pg);
678 nfound = 0;
679 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
680 PROC_LOCK(p);
681 if (p_cansee(td, p) != 0) {
682 PROC_UNLOCK(p);
683 continue;
684 }
685 PROC_UNLOCK(p);
686 nfound++;
687 if (descend)
688 ret |= ktrsetchildren(td, p, ops, facs, vp);
689 else
690 ret |= ktrops(td, p, ops, facs, vp);
691 }
692 if (nfound == 0) {
693 sx_sunlock(&proctree_lock);
694 error = ESRCH;
695 goto done;
696 }
697 } else {
698 /*
699 * by pid
700 */
701 p = pfind(uap->pid);
702 if (p == NULL) {
703 sx_sunlock(&proctree_lock);
704 error = ESRCH;
705 goto done;
706 }
707 error = p_cansee(td, p);
708 /*
709 * The slock of the proctree lock will keep this process
710 * from going away, so unlocking the proc here is ok.
711 */
712 PROC_UNLOCK(p);
713 if (error) {
714 sx_sunlock(&proctree_lock);
715 goto done;
716 }
717 if (descend)
718 ret |= ktrsetchildren(td, p, ops, facs, vp);
719 else
720 ret |= ktrops(td, p, ops, facs, vp);
721 }
722 sx_sunlock(&proctree_lock);
723 if (!ret)
724 error = EPERM;
725 done:
726 if (vp != NULL) {
727 mtx_lock(&Giant);
728 (void) vn_close(vp, FWRITE, td->td_ucred, td);
729 mtx_unlock(&Giant);
730 }
731 ktrace_exit(td);
732 return (error);
733 #else /* !KTRACE */
734 return (ENOSYS);
735 #endif /* KTRACE */
736 }
737
738 /*
739 * utrace system call
740 *
741 * MPSAFE
742 */
743 /* ARGSUSED */
744 int
745 utrace(td, uap)
746 struct thread *td;
747 register struct utrace_args *uap;
748 {
749
750 #ifdef KTRACE
751 struct ktr_request *req;
752 void *cp;
753 int error;
754
755 if (!KTRPOINT(td, KTR_USER))
756 return (0);
757 if (uap->len > KTR_USER_MAXLEN)
758 return (EINVAL);
759 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
760 error = copyin(uap->addr, cp, uap->len);
761 if (error) {
762 free(cp, M_KTRACE);
763 return (error);
764 }
765 req = ktr_getrequest(KTR_USER);
766 if (req == NULL) {
767 free(cp, M_KTRACE);
768 return (ENOMEM);
769 }
770 req->ktr_buffer = cp;
771 req->ktr_header.ktr_len = uap->len;
772 ktr_submitrequest(td, req);
773 return (0);
774 #else /* !KTRACE */
775 return (ENOSYS);
776 #endif /* KTRACE */
777 }
778
779 #ifdef KTRACE
780 static int
781 ktrops(td, p, ops, facs, vp)
782 struct thread *td;
783 struct proc *p;
784 int ops, facs;
785 struct vnode *vp;
786 {
787 struct vnode *tracevp = NULL;
788 struct ucred *tracecred = NULL;
789
790 PROC_LOCK(p);
791 if (!ktrcanset(td, p)) {
792 PROC_UNLOCK(p);
793 return (0);
794 }
795 mtx_lock(&ktrace_mtx);
796 if (ops == KTROP_SET) {
797 if (p->p_tracevp != vp) {
798 /*
799 * if trace file already in use, relinquish below
800 */
801 tracevp = p->p_tracevp;
802 VREF(vp);
803 p->p_tracevp = vp;
804 }
805 if (p->p_tracecred != td->td_ucred) {
806 tracecred = p->p_tracecred;
807 p->p_tracecred = crhold(td->td_ucred);
808 }
809 p->p_traceflag |= facs;
810 if (suser_cred(td->td_ucred, SUSER_ALLOWJAIL) == 0)
811 p->p_traceflag |= KTRFAC_ROOT;
812 } else {
813 /* KTROP_CLEAR */
814 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
815 /* no more tracing */
816 p->p_traceflag = 0;
817 tracevp = p->p_tracevp;
818 p->p_tracevp = NULL;
819 tracecred = p->p_tracecred;
820 p->p_tracecred = NULL;
821 }
822 }
823 mtx_unlock(&ktrace_mtx);
824 PROC_UNLOCK(p);
825 if (tracevp != NULL) {
826 int vfslocked;
827
828 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
829 vrele(tracevp);
830 VFS_UNLOCK_GIANT(vfslocked);
831 }
832 if (tracecred != NULL)
833 crfree(tracecred);
834
835 return (1);
836 }
837
838 static int
839 ktrsetchildren(td, top, ops, facs, vp)
840 struct thread *td;
841 struct proc *top;
842 int ops, facs;
843 struct vnode *vp;
844 {
845 register struct proc *p;
846 register int ret = 0;
847
848 p = top;
849 sx_assert(&proctree_lock, SX_LOCKED);
850 for (;;) {
851 ret |= ktrops(td, p, ops, facs, vp);
852 /*
853 * If this process has children, descend to them next,
854 * otherwise do any siblings, and if done with this level,
855 * follow back up the tree (but not past top).
856 */
857 if (!LIST_EMPTY(&p->p_children))
858 p = LIST_FIRST(&p->p_children);
859 else for (;;) {
860 if (p == top)
861 return (ret);
862 if (LIST_NEXT(p, p_sibling)) {
863 p = LIST_NEXT(p, p_sibling);
864 break;
865 }
866 p = p->p_pptr;
867 }
868 }
869 /*NOTREACHED*/
870 }
871
872 static void
873 ktr_writerequest(struct thread *td, struct ktr_request *req)
874 {
875 struct ktr_header *kth;
876 struct vnode *vp;
877 struct proc *p;
878 struct ucred *cred;
879 struct uio auio;
880 struct iovec aiov[3];
881 struct mount *mp;
882 int datalen, buflen, vrele_count;
883 int error;
884
885 /*
886 * We hold the vnode and credential for use in I/O in case ktrace is
887 * disabled on the process as we write out the request.
888 *
889 * XXXRW: This is not ideal: we could end up performing a write after
890 * the vnode has been closed.
891 */
892 mtx_lock(&ktrace_mtx);
893 vp = td->td_proc->p_tracevp;
894 if (vp != NULL)
895 VREF(vp);
896 cred = td->td_proc->p_tracecred;
897 if (cred != NULL)
898 crhold(cred);
899 mtx_unlock(&ktrace_mtx);
900
901 /*
902 * If vp is NULL, the vp has been cleared out from under this
903 * request, so just drop it. Make sure the credential and vnode are
904 * in sync: we should have both or neither.
905 */
906 if (vp == NULL) {
907 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
908 return;
909 }
910 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
911
912 kth = &req->ktr_header;
913 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
914 buflen = kth->ktr_len;
915 auio.uio_iov = &aiov[0];
916 auio.uio_offset = 0;
917 auio.uio_segflg = UIO_SYSSPACE;
918 auio.uio_rw = UIO_WRITE;
919 aiov[0].iov_base = (caddr_t)kth;
920 aiov[0].iov_len = sizeof(struct ktr_header);
921 auio.uio_resid = sizeof(struct ktr_header);
922 auio.uio_iovcnt = 1;
923 auio.uio_td = td;
924 if (datalen != 0) {
925 aiov[1].iov_base = (caddr_t)&req->ktr_data;
926 aiov[1].iov_len = datalen;
927 auio.uio_resid += datalen;
928 auio.uio_iovcnt++;
929 kth->ktr_len += datalen;
930 }
931 if (buflen != 0) {
932 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
933 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
934 aiov[auio.uio_iovcnt].iov_len = buflen;
935 auio.uio_resid += buflen;
936 auio.uio_iovcnt++;
937 }
938
939 mtx_lock(&Giant);
940 vn_start_write(vp, &mp, V_WAIT);
941 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
942 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
943 #ifdef MAC
944 error = mac_check_vnode_write(cred, NOCRED, vp);
945 if (error == 0)
946 #endif
947 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
948 VOP_UNLOCK(vp, 0, td);
949 vn_finished_write(mp);
950 vrele(vp);
951 mtx_unlock(&Giant);
952 if (!error)
953 return;
954 /*
955 * If error encountered, give up tracing on this vnode. We defer
956 * all the vrele()'s on the vnode until after we are finished walking
957 * the various lists to avoid needlessly holding locks.
958 */
959 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
960 error);
961 vrele_count = 0;
962 /*
963 * First, clear this vnode from being used by any processes in the
964 * system.
965 * XXX - If one process gets an EPERM writing to the vnode, should
966 * we really do this? Other processes might have suitable
967 * credentials for the operation.
968 */
969 cred = NULL;
970 sx_slock(&allproc_lock);
971 LIST_FOREACH(p, &allproc, p_list) {
972 PROC_LOCK(p);
973 if (p->p_tracevp == vp) {
974 mtx_lock(&ktrace_mtx);
975 p->p_tracevp = NULL;
976 p->p_traceflag = 0;
977 cred = p->p_tracecred;
978 p->p_tracecred = NULL;
979 mtx_unlock(&ktrace_mtx);
980 vrele_count++;
981 }
982 PROC_UNLOCK(p);
983 if (cred != NULL) {
984 crfree(cred);
985 cred = NULL;
986 }
987 }
988 sx_sunlock(&allproc_lock);
989
990 /*
991 * We can't clear any pending requests in threads that have cached
992 * them but not yet committed them, as those are per-thread. The
993 * thread will have to clear it itself on system call return.
994 */
995 mtx_lock(&Giant);
996 while (vrele_count-- > 0)
997 vrele(vp);
998 mtx_unlock(&Giant);
999 }
1000
1001 /*
1002 * Return true if caller has permission to set the ktracing state
1003 * of target. Essentially, the target can't possess any
1004 * more permissions than the caller. KTRFAC_ROOT signifies that
1005 * root previously set the tracing status on the target process, and
1006 * so, only root may further change it.
1007 */
1008 static int
1009 ktrcanset(td, targetp)
1010 struct thread *td;
1011 struct proc *targetp;
1012 {
1013
1014 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1015 if (targetp->p_traceflag & KTRFAC_ROOT &&
1016 suser_cred(td->td_ucred, SUSER_ALLOWJAIL))
1017 return (0);
1018
1019 if (p_candebug(td, targetp) != 0)
1020 return (0);
1021
1022 return (1);
1023 }
1024
1025 #endif /* KTRACE */
Cache object: 49ee4015f986f1ccaf36fb765815753d
|