1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/6.2/sys/kern/kern_ktrace.c 164286 2006-11-14 20:42:41Z cvs2svn $");
36
37 #include "opt_ktrace.h"
38 #include "opt_mac.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/mac.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/ktrace.h>
55 #include <sys/sx.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
58 #include <sys/sysproto.h>
59
60 /*
61 * The ktrace facility allows the tracing of certain key events in user space
62 * processes, such as system calls, signal delivery, context switches, and
63 * user generated events using utrace(2). It works by streaming event
64 * records and data to a vnode associated with the process using the
65 * ktrace(2) system call. In general, records can be written directly from
66 * the context that generates the event. One important exception to this is
67 * during a context switch, where sleeping is not permitted. To handle this
68 * case, trace events are generated using in-kernel ktr_request records, and
69 * then delivered to disk at a convenient moment -- either immediately, the
70 * next traceable event, at system call return, or at process exit.
71 *
72 * When dealing with multiple threads or processes writing to the same event
73 * log, ordering guarantees are weak: specifically, if an event has multiple
74 * records (i.e., system call enter and return), they may be interlaced with
75 * records from another event. Process and thread ID information is provided
76 * in the record, and user applications can de-interlace events if required.
77 */
78
79 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
80
81 #ifdef KTRACE
82
83 #ifndef KTRACE_REQUEST_POOL
84 #define KTRACE_REQUEST_POOL 100
85 #endif
86
87 struct ktr_request {
88 struct ktr_header ktr_header;
89 void *ktr_buffer;
90 union {
91 struct ktr_syscall ktr_syscall;
92 struct ktr_sysret ktr_sysret;
93 struct ktr_genio ktr_genio;
94 struct ktr_psig ktr_psig;
95 struct ktr_csw ktr_csw;
96 } ktr_data;
97 STAILQ_ENTRY(ktr_request) ktr_list;
98 };
99
100 static int data_lengths[] = {
101 0, /* none */
102 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
103 sizeof(struct ktr_sysret), /* KTR_SYSRET */
104 0, /* KTR_NAMEI */
105 sizeof(struct ktr_genio), /* KTR_GENIO */
106 sizeof(struct ktr_psig), /* KTR_PSIG */
107 sizeof(struct ktr_csw), /* KTR_CSW */
108 0 /* KTR_USER */
109 };
110
111 static STAILQ_HEAD(, ktr_request) ktr_free;
112
113 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
114
115 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
116 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
117
118 static u_int ktr_geniosize = PAGE_SIZE;
119 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
120 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
121 0, "Maximum size of genio event payload");
122
123 static int print_message = 1;
124 struct mtx ktrace_mtx;
125 static struct cv ktrace_cv;
126 static struct sx ktrace_sx;
127
128 static void ktrace_init(void *dummy);
129 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
130 static u_int ktrace_resize_pool(u_int newsize);
131 static struct ktr_request *ktr_getrequest(int type);
132 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
133 static void ktr_freerequest(struct ktr_request *req);
134 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
135 static int ktrcanset(struct thread *,struct proc *);
136 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
137 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
138
139 /*
140 * ktrace itself generates events, such as context switches, which we do not
141 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
142 * whether or not it is in a region where tracing of events should be
143 * suppressed.
144 */
145 static void
146 ktrace_enter(struct thread *td)
147 {
148
149 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
150 td->td_pflags |= TDP_INKTRACE;
151 }
152
153 static void
154 ktrace_exit(struct thread *td)
155 {
156
157 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
158 td->td_pflags &= ~TDP_INKTRACE;
159 }
160
161 static void
162 ktrace_assert(struct thread *td)
163 {
164
165 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
166 }
167
168 static void
169 ktrace_init(void *dummy)
170 {
171 struct ktr_request *req;
172 int i;
173
174 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
175 sx_init(&ktrace_sx, "ktrace_sx");
176 cv_init(&ktrace_cv, "ktrace");
177 STAILQ_INIT(&ktr_free);
178 for (i = 0; i < ktr_requestpool; i++) {
179 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
180 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
181 }
182 }
183 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
184
185 static int
186 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
187 {
188 struct thread *td;
189 u_int newsize, oldsize, wantsize;
190 int error;
191
192 /* Handle easy read-only case first to avoid warnings from GCC. */
193 if (!req->newptr) {
194 mtx_lock(&ktrace_mtx);
195 oldsize = ktr_requestpool;
196 mtx_unlock(&ktrace_mtx);
197 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
198 }
199
200 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
201 if (error)
202 return (error);
203 td = curthread;
204 ktrace_enter(td);
205 mtx_lock(&ktrace_mtx);
206 oldsize = ktr_requestpool;
207 newsize = ktrace_resize_pool(wantsize);
208 mtx_unlock(&ktrace_mtx);
209 ktrace_exit(td);
210 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
211 if (error)
212 return (error);
213 if (wantsize > oldsize && newsize < wantsize)
214 return (ENOSPC);
215 return (0);
216 }
217 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
218 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
219
220 static u_int
221 ktrace_resize_pool(u_int newsize)
222 {
223 struct ktr_request *req;
224 int bound;
225
226 mtx_assert(&ktrace_mtx, MA_OWNED);
227 print_message = 1;
228 bound = newsize - ktr_requestpool;
229 if (bound == 0)
230 return (ktr_requestpool);
231 if (bound < 0)
232 /* Shrink pool down to newsize if possible. */
233 while (bound++ < 0) {
234 req = STAILQ_FIRST(&ktr_free);
235 if (req == NULL)
236 return (ktr_requestpool);
237 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
238 ktr_requestpool--;
239 mtx_unlock(&ktrace_mtx);
240 free(req, M_KTRACE);
241 mtx_lock(&ktrace_mtx);
242 }
243 else
244 /* Grow pool up to newsize. */
245 while (bound-- > 0) {
246 mtx_unlock(&ktrace_mtx);
247 req = malloc(sizeof(struct ktr_request), M_KTRACE,
248 M_WAITOK);
249 mtx_lock(&ktrace_mtx);
250 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
251 ktr_requestpool++;
252 }
253 return (ktr_requestpool);
254 }
255
256 static struct ktr_request *
257 ktr_getrequest(int type)
258 {
259 struct ktr_request *req;
260 struct thread *td = curthread;
261 struct proc *p = td->td_proc;
262 int pm;
263
264 ktrace_enter(td); /* XXX: In caller instead? */
265 mtx_lock(&ktrace_mtx);
266 if (!KTRCHECK(td, type)) {
267 mtx_unlock(&ktrace_mtx);
268 ktrace_exit(td);
269 return (NULL);
270 }
271 req = STAILQ_FIRST(&ktr_free);
272 if (req != NULL) {
273 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
274 req->ktr_header.ktr_type = type;
275 if (p->p_traceflag & KTRFAC_DROP) {
276 req->ktr_header.ktr_type |= KTR_DROP;
277 p->p_traceflag &= ~KTRFAC_DROP;
278 }
279 mtx_unlock(&ktrace_mtx);
280 microtime(&req->ktr_header.ktr_time);
281 req->ktr_header.ktr_pid = p->p_pid;
282 req->ktr_header.ktr_tid = td->td_tid;
283 bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
284 req->ktr_buffer = NULL;
285 req->ktr_header.ktr_len = 0;
286 } else {
287 p->p_traceflag |= KTRFAC_DROP;
288 pm = print_message;
289 print_message = 0;
290 mtx_unlock(&ktrace_mtx);
291 if (pm)
292 printf("Out of ktrace request objects.\n");
293 ktrace_exit(td);
294 }
295 return (req);
296 }
297
298 /*
299 * Some trace generation environments don't permit direct access to VFS,
300 * such as during a context switch where sleeping is not allowed. Under these
301 * circumstances, queue a request to the thread to be written asynchronously
302 * later.
303 */
304 static void
305 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
306 {
307
308 mtx_lock(&ktrace_mtx);
309 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
310 mtx_unlock(&ktrace_mtx);
311 ktrace_exit(td);
312 }
313
314 /*
315 * Drain any pending ktrace records from the per-thread queue to disk. This
316 * is used both internally before committing other records, and also on
317 * system call return. We drain all the ones we can find at the time when
318 * drain is requested, but don't keep draining after that as those events
319 * may me approximately "after" the current event.
320 */
321 static void
322 ktr_drain(struct thread *td)
323 {
324 struct ktr_request *queued_req;
325 STAILQ_HEAD(, ktr_request) local_queue;
326
327 ktrace_assert(td);
328 sx_assert(&ktrace_sx, SX_XLOCKED);
329
330 STAILQ_INIT(&local_queue); /* XXXRW: needed? */
331
332 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
333 mtx_lock(&ktrace_mtx);
334 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
335 mtx_unlock(&ktrace_mtx);
336
337 while ((queued_req = STAILQ_FIRST(&local_queue))) {
338 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
339 ktr_writerequest(td, queued_req);
340 ktr_freerequest(queued_req);
341 }
342 }
343 }
344
345 /*
346 * Submit a trace record for immediate commit to disk -- to be used only
347 * where entering VFS is OK. First drain any pending records that may have
348 * been cached in the thread.
349 */
350 static void
351 ktr_submitrequest(struct thread *td, struct ktr_request *req)
352 {
353
354 ktrace_assert(td);
355
356 sx_xlock(&ktrace_sx);
357 ktr_drain(td);
358 ktr_writerequest(td, req);
359 ktr_freerequest(req);
360 sx_xunlock(&ktrace_sx);
361
362 ktrace_exit(td);
363 }
364
365 static void
366 ktr_freerequest(struct ktr_request *req)
367 {
368
369 if (req->ktr_buffer != NULL)
370 free(req->ktr_buffer, M_KTRACE);
371 mtx_lock(&ktrace_mtx);
372 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
373 mtx_unlock(&ktrace_mtx);
374 }
375
376 /*
377 * MPSAFE
378 */
379 void
380 ktrsyscall(code, narg, args)
381 int code, narg;
382 register_t args[];
383 {
384 struct ktr_request *req;
385 struct ktr_syscall *ktp;
386 size_t buflen;
387 char *buf = NULL;
388
389 buflen = sizeof(register_t) * narg;
390 if (buflen > 0) {
391 buf = malloc(buflen, M_KTRACE, M_WAITOK);
392 bcopy(args, buf, buflen);
393 }
394 req = ktr_getrequest(KTR_SYSCALL);
395 if (req == NULL) {
396 if (buf != NULL)
397 free(buf, M_KTRACE);
398 return;
399 }
400 ktp = &req->ktr_data.ktr_syscall;
401 ktp->ktr_code = code;
402 ktp->ktr_narg = narg;
403 if (buflen > 0) {
404 req->ktr_header.ktr_len = buflen;
405 req->ktr_buffer = buf;
406 }
407 ktr_submitrequest(curthread, req);
408 }
409
410 /*
411 * MPSAFE
412 */
413 void
414 ktrsysret(code, error, retval)
415 int code, error;
416 register_t retval;
417 {
418 struct ktr_request *req;
419 struct ktr_sysret *ktp;
420
421 req = ktr_getrequest(KTR_SYSRET);
422 if (req == NULL)
423 return;
424 ktp = &req->ktr_data.ktr_sysret;
425 ktp->ktr_code = code;
426 ktp->ktr_error = error;
427 ktp->ktr_retval = retval; /* what about val2 ? */
428 ktr_submitrequest(curthread, req);
429 }
430
431 /*
432 * When a process exits, drain per-process asynchronous trace records.
433 */
434 void
435 ktrprocexit(struct thread *td)
436 {
437
438 ktrace_enter(td);
439 sx_xlock(&ktrace_sx);
440 ktr_drain(td);
441 sx_xunlock(&ktrace_sx);
442 ktrace_exit(td);
443 }
444
445 /*
446 * When a thread returns, drain any asynchronous records generated by the
447 * system call.
448 */
449 void
450 ktruserret(struct thread *td)
451 {
452
453 ktrace_enter(td);
454 sx_xlock(&ktrace_sx);
455 ktr_drain(td);
456 sx_xunlock(&ktrace_sx);
457 ktrace_exit(td);
458 }
459
460 void
461 ktrnamei(path)
462 char *path;
463 {
464 struct ktr_request *req;
465 int namelen;
466 char *buf = NULL;
467
468 namelen = strlen(path);
469 if (namelen > 0) {
470 buf = malloc(namelen, M_KTRACE, M_WAITOK);
471 bcopy(path, buf, namelen);
472 }
473 req = ktr_getrequest(KTR_NAMEI);
474 if (req == NULL) {
475 if (buf != NULL)
476 free(buf, M_KTRACE);
477 return;
478 }
479 if (namelen > 0) {
480 req->ktr_header.ktr_len = namelen;
481 req->ktr_buffer = buf;
482 }
483 ktr_submitrequest(curthread, req);
484 }
485
486 void
487 ktrgenio(fd, rw, uio, error)
488 int fd;
489 enum uio_rw rw;
490 struct uio *uio;
491 int error;
492 {
493 struct ktr_request *req;
494 struct ktr_genio *ktg;
495 int datalen;
496 char *buf;
497
498 if (error) {
499 free(uio, M_IOV);
500 return;
501 }
502 uio->uio_offset = 0;
503 uio->uio_rw = UIO_WRITE;
504 datalen = imin(uio->uio_resid, ktr_geniosize);
505 buf = malloc(datalen, M_KTRACE, M_WAITOK);
506 error = uiomove(buf, datalen, uio);
507 free(uio, M_IOV);
508 if (error) {
509 free(buf, M_KTRACE);
510 return;
511 }
512 req = ktr_getrequest(KTR_GENIO);
513 if (req == NULL) {
514 free(buf, M_KTRACE);
515 return;
516 }
517 ktg = &req->ktr_data.ktr_genio;
518 ktg->ktr_fd = fd;
519 ktg->ktr_rw = rw;
520 req->ktr_header.ktr_len = datalen;
521 req->ktr_buffer = buf;
522 ktr_submitrequest(curthread, req);
523 }
524
525 void
526 ktrpsig(sig, action, mask, code)
527 int sig;
528 sig_t action;
529 sigset_t *mask;
530 int code;
531 {
532 struct ktr_request *req;
533 struct ktr_psig *kp;
534
535 req = ktr_getrequest(KTR_PSIG);
536 if (req == NULL)
537 return;
538 kp = &req->ktr_data.ktr_psig;
539 kp->signo = (char)sig;
540 kp->action = action;
541 kp->mask = *mask;
542 kp->code = code;
543 ktr_enqueuerequest(curthread, req);
544 }
545
546 void
547 ktrcsw(out, user)
548 int out, user;
549 {
550 struct ktr_request *req;
551 struct ktr_csw *kc;
552
553 req = ktr_getrequest(KTR_CSW);
554 if (req == NULL)
555 return;
556 kc = &req->ktr_data.ktr_csw;
557 kc->out = out;
558 kc->user = user;
559 ktr_enqueuerequest(curthread, req);
560 }
561 #endif /* KTRACE */
562
563 /* Interface and common routines */
564
565 /*
566 * ktrace system call
567 *
568 * MPSAFE
569 */
570 #ifndef _SYS_SYSPROTO_H_
571 struct ktrace_args {
572 char *fname;
573 int ops;
574 int facs;
575 int pid;
576 };
577 #endif
578 /* ARGSUSED */
579 int
580 ktrace(td, uap)
581 struct thread *td;
582 register struct ktrace_args *uap;
583 {
584 #ifdef KTRACE
585 register struct vnode *vp = NULL;
586 register struct proc *p;
587 struct pgrp *pg;
588 int facs = uap->facs & ~KTRFAC_ROOT;
589 int ops = KTROP(uap->ops);
590 int descend = uap->ops & KTRFLAG_DESCEND;
591 int nfound, ret = 0;
592 int flags, error = 0;
593 struct nameidata nd;
594 struct ucred *cred;
595
596 /*
597 * Need something to (un)trace.
598 */
599 if (ops != KTROP_CLEARFILE && facs == 0)
600 return (EINVAL);
601
602 ktrace_enter(td);
603 if (ops != KTROP_CLEAR) {
604 /*
605 * an operation which requires a file argument.
606 */
607 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
608 flags = FREAD | FWRITE | O_NOFOLLOW;
609 mtx_lock(&Giant);
610 error = vn_open(&nd, &flags, 0, -1);
611 if (error) {
612 mtx_unlock(&Giant);
613 ktrace_exit(td);
614 return (error);
615 }
616 NDFREE(&nd, NDF_ONLY_PNBUF);
617 vp = nd.ni_vp;
618 VOP_UNLOCK(vp, 0, td);
619 if (vp->v_type != VREG) {
620 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
621 mtx_unlock(&Giant);
622 ktrace_exit(td);
623 return (EACCES);
624 }
625 mtx_unlock(&Giant);
626 }
627 /*
628 * Clear all uses of the tracefile.
629 */
630 if (ops == KTROP_CLEARFILE) {
631 sx_slock(&allproc_lock);
632 LIST_FOREACH(p, &allproc, p_list) {
633 PROC_LOCK(p);
634 if (p->p_tracevp == vp) {
635 if (ktrcanset(td, p)) {
636 mtx_lock(&ktrace_mtx);
637 cred = p->p_tracecred;
638 p->p_tracecred = NULL;
639 p->p_tracevp = NULL;
640 p->p_traceflag = 0;
641 mtx_unlock(&ktrace_mtx);
642 PROC_UNLOCK(p);
643 mtx_lock(&Giant);
644 (void) vn_close(vp, FREAD|FWRITE,
645 cred, td);
646 mtx_unlock(&Giant);
647 crfree(cred);
648 } else {
649 PROC_UNLOCK(p);
650 error = EPERM;
651 }
652 } else
653 PROC_UNLOCK(p);
654 }
655 sx_sunlock(&allproc_lock);
656 goto done;
657 }
658 /*
659 * do it
660 */
661 sx_slock(&proctree_lock);
662 if (uap->pid < 0) {
663 /*
664 * by process group
665 */
666 pg = pgfind(-uap->pid);
667 if (pg == NULL) {
668 sx_sunlock(&proctree_lock);
669 error = ESRCH;
670 goto done;
671 }
672 /*
673 * ktrops() may call vrele(). Lock pg_members
674 * by the proctree_lock rather than pg_mtx.
675 */
676 PGRP_UNLOCK(pg);
677 nfound = 0;
678 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
679 PROC_LOCK(p);
680 if (p_cansee(td, p) != 0) {
681 PROC_UNLOCK(p);
682 continue;
683 }
684 PROC_UNLOCK(p);
685 nfound++;
686 if (descend)
687 ret |= ktrsetchildren(td, p, ops, facs, vp);
688 else
689 ret |= ktrops(td, p, ops, facs, vp);
690 }
691 if (nfound == 0) {
692 sx_sunlock(&proctree_lock);
693 error = ESRCH;
694 goto done;
695 }
696 } else {
697 /*
698 * by pid
699 */
700 p = pfind(uap->pid);
701 if (p == NULL) {
702 sx_sunlock(&proctree_lock);
703 error = ESRCH;
704 goto done;
705 }
706 error = p_cansee(td, p);
707 /*
708 * The slock of the proctree lock will keep this process
709 * from going away, so unlocking the proc here is ok.
710 */
711 PROC_UNLOCK(p);
712 if (error) {
713 sx_sunlock(&proctree_lock);
714 goto done;
715 }
716 if (descend)
717 ret |= ktrsetchildren(td, p, ops, facs, vp);
718 else
719 ret |= ktrops(td, p, ops, facs, vp);
720 }
721 sx_sunlock(&proctree_lock);
722 if (!ret)
723 error = EPERM;
724 done:
725 if (vp != NULL) {
726 mtx_lock(&Giant);
727 (void) vn_close(vp, FWRITE, td->td_ucred, td);
728 mtx_unlock(&Giant);
729 }
730 ktrace_exit(td);
731 return (error);
732 #else /* !KTRACE */
733 return (ENOSYS);
734 #endif /* KTRACE */
735 }
736
737 /*
738 * utrace system call
739 *
740 * MPSAFE
741 */
742 /* ARGSUSED */
743 int
744 utrace(td, uap)
745 struct thread *td;
746 register struct utrace_args *uap;
747 {
748
749 #ifdef KTRACE
750 struct ktr_request *req;
751 void *cp;
752 int error;
753
754 if (!KTRPOINT(td, KTR_USER))
755 return (0);
756 if (uap->len > KTR_USER_MAXLEN)
757 return (EINVAL);
758 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
759 error = copyin(uap->addr, cp, uap->len);
760 if (error) {
761 free(cp, M_KTRACE);
762 return (error);
763 }
764 req = ktr_getrequest(KTR_USER);
765 if (req == NULL) {
766 free(cp, M_KTRACE);
767 return (ENOMEM);
768 }
769 req->ktr_buffer = cp;
770 req->ktr_header.ktr_len = uap->len;
771 ktr_submitrequest(td, req);
772 return (0);
773 #else /* !KTRACE */
774 return (ENOSYS);
775 #endif /* KTRACE */
776 }
777
778 #ifdef KTRACE
779 static int
780 ktrops(td, p, ops, facs, vp)
781 struct thread *td;
782 struct proc *p;
783 int ops, facs;
784 struct vnode *vp;
785 {
786 struct vnode *tracevp = NULL;
787 struct ucred *tracecred = NULL;
788
789 PROC_LOCK(p);
790 if (!ktrcanset(td, p)) {
791 PROC_UNLOCK(p);
792 return (0);
793 }
794 mtx_lock(&ktrace_mtx);
795 if (ops == KTROP_SET) {
796 if (p->p_tracevp != vp) {
797 /*
798 * if trace file already in use, relinquish below
799 */
800 tracevp = p->p_tracevp;
801 VREF(vp);
802 p->p_tracevp = vp;
803 }
804 if (p->p_tracecred != td->td_ucred) {
805 tracecred = p->p_tracecred;
806 p->p_tracecred = crhold(td->td_ucred);
807 }
808 p->p_traceflag |= facs;
809 if (suser_cred(td->td_ucred, SUSER_ALLOWJAIL) == 0)
810 p->p_traceflag |= KTRFAC_ROOT;
811 } else {
812 /* KTROP_CLEAR */
813 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
814 /* no more tracing */
815 p->p_traceflag = 0;
816 tracevp = p->p_tracevp;
817 p->p_tracevp = NULL;
818 tracecred = p->p_tracecred;
819 p->p_tracecred = NULL;
820 }
821 }
822 mtx_unlock(&ktrace_mtx);
823 PROC_UNLOCK(p);
824 if (tracevp != NULL) {
825 int vfslocked;
826
827 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
828 vrele(tracevp);
829 VFS_UNLOCK_GIANT(vfslocked);
830 }
831 if (tracecred != NULL)
832 crfree(tracecred);
833
834 return (1);
835 }
836
837 static int
838 ktrsetchildren(td, top, ops, facs, vp)
839 struct thread *td;
840 struct proc *top;
841 int ops, facs;
842 struct vnode *vp;
843 {
844 register struct proc *p;
845 register int ret = 0;
846
847 p = top;
848 sx_assert(&proctree_lock, SX_LOCKED);
849 for (;;) {
850 ret |= ktrops(td, p, ops, facs, vp);
851 /*
852 * If this process has children, descend to them next,
853 * otherwise do any siblings, and if done with this level,
854 * follow back up the tree (but not past top).
855 */
856 if (!LIST_EMPTY(&p->p_children))
857 p = LIST_FIRST(&p->p_children);
858 else for (;;) {
859 if (p == top)
860 return (ret);
861 if (LIST_NEXT(p, p_sibling)) {
862 p = LIST_NEXT(p, p_sibling);
863 break;
864 }
865 p = p->p_pptr;
866 }
867 }
868 /*NOTREACHED*/
869 }
870
871 static void
872 ktr_writerequest(struct thread *td, struct ktr_request *req)
873 {
874 struct ktr_header *kth;
875 struct vnode *vp;
876 struct proc *p;
877 struct ucred *cred;
878 struct uio auio;
879 struct iovec aiov[3];
880 struct mount *mp;
881 int datalen, buflen, vrele_count;
882 int error;
883
884 /*
885 * We hold the vnode and credential for use in I/O in case ktrace is
886 * disabled on the process as we write out the request.
887 *
888 * XXXRW: This is not ideal: we could end up performing a write after
889 * the vnode has been closed.
890 */
891 mtx_lock(&ktrace_mtx);
892 vp = td->td_proc->p_tracevp;
893 if (vp != NULL)
894 VREF(vp);
895 cred = td->td_proc->p_tracecred;
896 if (cred != NULL)
897 crhold(cred);
898 mtx_unlock(&ktrace_mtx);
899
900 /*
901 * If vp is NULL, the vp has been cleared out from under this
902 * request, so just drop it. Make sure the credential and vnode are
903 * in sync: we should have both or neither.
904 */
905 if (vp == NULL) {
906 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
907 return;
908 }
909 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
910
911 kth = &req->ktr_header;
912 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
913 buflen = kth->ktr_len;
914 auio.uio_iov = &aiov[0];
915 auio.uio_offset = 0;
916 auio.uio_segflg = UIO_SYSSPACE;
917 auio.uio_rw = UIO_WRITE;
918 aiov[0].iov_base = (caddr_t)kth;
919 aiov[0].iov_len = sizeof(struct ktr_header);
920 auio.uio_resid = sizeof(struct ktr_header);
921 auio.uio_iovcnt = 1;
922 auio.uio_td = td;
923 if (datalen != 0) {
924 aiov[1].iov_base = (caddr_t)&req->ktr_data;
925 aiov[1].iov_len = datalen;
926 auio.uio_resid += datalen;
927 auio.uio_iovcnt++;
928 kth->ktr_len += datalen;
929 }
930 if (buflen != 0) {
931 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
932 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
933 aiov[auio.uio_iovcnt].iov_len = buflen;
934 auio.uio_resid += buflen;
935 auio.uio_iovcnt++;
936 }
937
938 mtx_lock(&Giant);
939 vn_start_write(vp, &mp, V_WAIT);
940 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
941 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
942 #ifdef MAC
943 error = mac_check_vnode_write(cred, NOCRED, vp);
944 if (error == 0)
945 #endif
946 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
947 VOP_UNLOCK(vp, 0, td);
948 vn_finished_write(mp);
949 vrele(vp);
950 mtx_unlock(&Giant);
951 if (!error)
952 return;
953 /*
954 * If error encountered, give up tracing on this vnode. We defer
955 * all the vrele()'s on the vnode until after we are finished walking
956 * the various lists to avoid needlessly holding locks.
957 */
958 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
959 error);
960 vrele_count = 0;
961 /*
962 * First, clear this vnode from being used by any processes in the
963 * system.
964 * XXX - If one process gets an EPERM writing to the vnode, should
965 * we really do this? Other processes might have suitable
966 * credentials for the operation.
967 */
968 cred = NULL;
969 sx_slock(&allproc_lock);
970 LIST_FOREACH(p, &allproc, p_list) {
971 PROC_LOCK(p);
972 if (p->p_tracevp == vp) {
973 mtx_lock(&ktrace_mtx);
974 p->p_tracevp = NULL;
975 p->p_traceflag = 0;
976 cred = p->p_tracecred;
977 p->p_tracecred = NULL;
978 mtx_unlock(&ktrace_mtx);
979 vrele_count++;
980 }
981 PROC_UNLOCK(p);
982 if (cred != NULL) {
983 crfree(cred);
984 cred = NULL;
985 }
986 }
987 sx_sunlock(&allproc_lock);
988
989 /*
990 * We can't clear any pending requests in threads that have cached
991 * them but not yet committed them, as those are per-thread. The
992 * thread will have to clear it itself on system call return.
993 */
994 mtx_lock(&Giant);
995 while (vrele_count-- > 0)
996 vrele(vp);
997 mtx_unlock(&Giant);
998 }
999
1000 /*
1001 * Return true if caller has permission to set the ktracing state
1002 * of target. Essentially, the target can't possess any
1003 * more permissions than the caller. KTRFAC_ROOT signifies that
1004 * root previously set the tracing status on the target process, and
1005 * so, only root may further change it.
1006 */
1007 static int
1008 ktrcanset(td, targetp)
1009 struct thread *td;
1010 struct proc *targetp;
1011 {
1012
1013 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1014 if (targetp->p_traceflag & KTRFAC_ROOT &&
1015 suser_cred(td->td_ucred, SUSER_ALLOWJAIL))
1016 return (0);
1017
1018 if (p_candebug(td, targetp) != 0)
1019 return (0);
1020
1021 return (1);
1022 }
1023
1024 #endif /* KTRACE */
Cache object: 0c1f137977adc6ee9b1662c91cb156fc
|