1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/5.3/sys/kern/kern_ktrace.c 136588 2004-10-16 08:43:07Z cvs2svn $");
34
35 #include "opt_ktrace.h"
36 #include "opt_mac.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/fcntl.h>
41 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/mac.h>
46 #include <sys/malloc.h>
47 #include <sys/namei.h>
48 #include <sys/proc.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/ktrace.h>
52 #include <sys/sx.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/sysproto.h>
56
57 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
58
59 #ifdef KTRACE
60
61 #ifndef KTRACE_REQUEST_POOL
62 #define KTRACE_REQUEST_POOL 100
63 #endif
64
65 struct ktr_request {
66 struct ktr_header ktr_header;
67 struct ucred *ktr_cred;
68 struct vnode *ktr_vp;
69 union {
70 struct ktr_syscall ktr_syscall;
71 struct ktr_sysret ktr_sysret;
72 struct ktr_genio ktr_genio;
73 struct ktr_psig ktr_psig;
74 struct ktr_csw ktr_csw;
75 } ktr_data;
76 STAILQ_ENTRY(ktr_request) ktr_list;
77 };
78
79 static int data_lengths[] = {
80 0, /* none */
81 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
82 sizeof(struct ktr_sysret), /* KTR_SYSRET */
83 0, /* KTR_NAMEI */
84 sizeof(struct ktr_genio), /* KTR_GENIO */
85 sizeof(struct ktr_psig), /* KTR_PSIG */
86 sizeof(struct ktr_csw), /* KTR_CSW */
87 0 /* KTR_USER */
88 };
89
90 static STAILQ_HEAD(, ktr_request) ktr_todo;
91 static STAILQ_HEAD(, ktr_request) ktr_free;
92
93 SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
94
95 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
96 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
97
98 static u_int ktr_geniosize = PAGE_SIZE;
99 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
100 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
101 0, "Maximum size of genio event payload");
102
103 static int print_message = 1;
104 struct mtx ktrace_mtx;
105 static struct cv ktrace_cv;
106
107 static void ktrace_init(void *dummy);
108 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
109 static u_int ktrace_resize_pool(u_int newsize);
110 static struct ktr_request *ktr_getrequest(int type);
111 static void ktr_submitrequest(struct ktr_request *req);
112 static void ktr_freerequest(struct ktr_request *req);
113 static void ktr_loop(void *dummy);
114 static void ktr_writerequest(struct ktr_request *req);
115 static int ktrcanset(struct thread *,struct proc *);
116 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
117 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
118
119 static void
120 ktrace_init(void *dummy)
121 {
122 struct ktr_request *req;
123 int i;
124
125 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
126 cv_init(&ktrace_cv, "ktrace");
127 STAILQ_INIT(&ktr_todo);
128 STAILQ_INIT(&ktr_free);
129 for (i = 0; i < ktr_requestpool; i++) {
130 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
131 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
132 }
133 kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, 0, "ktrace");
134 }
135 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
136
137 static int
138 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
139 {
140 struct thread *td;
141 u_int newsize, oldsize, wantsize;
142 int error;
143
144 /* Handle easy read-only case first to avoid warnings from GCC. */
145 if (!req->newptr) {
146 mtx_lock(&ktrace_mtx);
147 oldsize = ktr_requestpool;
148 mtx_unlock(&ktrace_mtx);
149 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
150 }
151
152 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
153 if (error)
154 return (error);
155 td = curthread;
156 td->td_pflags |= TDP_INKTRACE;
157 mtx_lock(&ktrace_mtx);
158 oldsize = ktr_requestpool;
159 newsize = ktrace_resize_pool(wantsize);
160 mtx_unlock(&ktrace_mtx);
161 td->td_pflags &= ~TDP_INKTRACE;
162 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
163 if (error)
164 return (error);
165 if (wantsize > oldsize && newsize < wantsize)
166 return (ENOSPC);
167 return (0);
168 }
169 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
170 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
171
172 static u_int
173 ktrace_resize_pool(u_int newsize)
174 {
175 struct ktr_request *req;
176 int bound;
177
178 mtx_assert(&ktrace_mtx, MA_OWNED);
179 print_message = 1;
180 bound = newsize - ktr_requestpool;
181 if (bound == 0)
182 return (ktr_requestpool);
183 if (bound < 0)
184 /* Shrink pool down to newsize if possible. */
185 while (bound++ < 0) {
186 req = STAILQ_FIRST(&ktr_free);
187 if (req == NULL)
188 return (ktr_requestpool);
189 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
190 ktr_requestpool--;
191 mtx_unlock(&ktrace_mtx);
192 free(req, M_KTRACE);
193 mtx_lock(&ktrace_mtx);
194 }
195 else
196 /* Grow pool up to newsize. */
197 while (bound-- > 0) {
198 mtx_unlock(&ktrace_mtx);
199 req = malloc(sizeof(struct ktr_request), M_KTRACE,
200 M_WAITOK);
201 mtx_lock(&ktrace_mtx);
202 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
203 ktr_requestpool++;
204 }
205 return (ktr_requestpool);
206 }
207
208 static struct ktr_request *
209 ktr_getrequest(int type)
210 {
211 struct ktr_request *req;
212 struct thread *td = curthread;
213 struct proc *p = td->td_proc;
214 int pm;
215
216 td->td_pflags |= TDP_INKTRACE;
217 mtx_lock(&ktrace_mtx);
218 if (!KTRCHECK(td, type)) {
219 mtx_unlock(&ktrace_mtx);
220 td->td_pflags &= ~TDP_INKTRACE;
221 return (NULL);
222 }
223 req = STAILQ_FIRST(&ktr_free);
224 if (req != NULL) {
225 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
226 req->ktr_header.ktr_type = type;
227 if (p->p_traceflag & KTRFAC_DROP) {
228 req->ktr_header.ktr_type |= KTR_DROP;
229 p->p_traceflag &= ~KTRFAC_DROP;
230 }
231 KASSERT(p->p_tracevp != NULL, ("ktrace: no trace vnode"));
232 KASSERT(p->p_tracecred != NULL, ("ktrace: no trace cred"));
233 req->ktr_vp = p->p_tracevp;
234 VREF(p->p_tracevp);
235 req->ktr_cred = crhold(p->p_tracecred);
236 mtx_unlock(&ktrace_mtx);
237 microtime(&req->ktr_header.ktr_time);
238 req->ktr_header.ktr_pid = p->p_pid;
239 bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
240 req->ktr_header.ktr_buffer = NULL;
241 req->ktr_header.ktr_len = 0;
242 } else {
243 p->p_traceflag |= KTRFAC_DROP;
244 pm = print_message;
245 print_message = 0;
246 mtx_unlock(&ktrace_mtx);
247 if (pm)
248 printf("Out of ktrace request objects.\n");
249 td->td_pflags &= ~TDP_INKTRACE;
250 }
251 return (req);
252 }
253
254 static void
255 ktr_submitrequest(struct ktr_request *req)
256 {
257
258 mtx_lock(&ktrace_mtx);
259 STAILQ_INSERT_TAIL(&ktr_todo, req, ktr_list);
260 cv_signal(&ktrace_cv);
261 mtx_unlock(&ktrace_mtx);
262 curthread->td_pflags &= ~TDP_INKTRACE;
263 }
264
265 static void
266 ktr_freerequest(struct ktr_request *req)
267 {
268
269 crfree(req->ktr_cred);
270 if (req->ktr_vp != NULL) {
271 mtx_lock(&Giant);
272 vrele(req->ktr_vp);
273 mtx_unlock(&Giant);
274 }
275 if (req->ktr_header.ktr_buffer != NULL)
276 free(req->ktr_header.ktr_buffer, M_KTRACE);
277 mtx_lock(&ktrace_mtx);
278 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
279 mtx_unlock(&ktrace_mtx);
280 }
281
282 static void
283 ktr_loop(void *dummy)
284 {
285 struct ktr_request *req;
286 struct thread *td;
287 struct ucred *cred;
288
289 /* Only cache these values once. */
290 td = curthread;
291 cred = td->td_ucred;
292 for (;;) {
293 mtx_lock(&ktrace_mtx);
294 while (STAILQ_EMPTY(&ktr_todo))
295 cv_wait(&ktrace_cv, &ktrace_mtx);
296 req = STAILQ_FIRST(&ktr_todo);
297 STAILQ_REMOVE_HEAD(&ktr_todo, ktr_list);
298 KASSERT(req != NULL, ("got a NULL request"));
299 mtx_unlock(&ktrace_mtx);
300 /*
301 * It is not enough just to pass the cached cred
302 * to the VOP's in ktr_writerequest(). Some VFS
303 * operations use curthread->td_ucred, so we need
304 * to modify our thread's credentials as well.
305 * Evil.
306 */
307 td->td_ucred = req->ktr_cred;
308 ktr_writerequest(req);
309 td->td_ucred = cred;
310 ktr_freerequest(req);
311 }
312 }
313
314 /*
315 * MPSAFE
316 */
317 void
318 ktrsyscall(code, narg, args)
319 int code, narg;
320 register_t args[];
321 {
322 struct ktr_request *req;
323 struct ktr_syscall *ktp;
324 size_t buflen;
325 char *buf = NULL;
326
327 buflen = sizeof(register_t) * narg;
328 if (buflen > 0) {
329 buf = malloc(buflen, M_KTRACE, M_WAITOK);
330 bcopy(args, buf, buflen);
331 }
332 req = ktr_getrequest(KTR_SYSCALL);
333 if (req == NULL) {
334 if (buf != NULL)
335 free(buf, M_KTRACE);
336 return;
337 }
338 ktp = &req->ktr_data.ktr_syscall;
339 ktp->ktr_code = code;
340 ktp->ktr_narg = narg;
341 if (buflen > 0) {
342 req->ktr_header.ktr_len = buflen;
343 req->ktr_header.ktr_buffer = buf;
344 }
345 ktr_submitrequest(req);
346 }
347
348 /*
349 * MPSAFE
350 */
351 void
352 ktrsysret(code, error, retval)
353 int code, error;
354 register_t retval;
355 {
356 struct ktr_request *req;
357 struct ktr_sysret *ktp;
358
359 req = ktr_getrequest(KTR_SYSRET);
360 if (req == NULL)
361 return;
362 ktp = &req->ktr_data.ktr_sysret;
363 ktp->ktr_code = code;
364 ktp->ktr_error = error;
365 ktp->ktr_retval = retval; /* what about val2 ? */
366 ktr_submitrequest(req);
367 }
368
369 void
370 ktrnamei(path)
371 char *path;
372 {
373 struct ktr_request *req;
374 int namelen;
375 char *buf = NULL;
376
377 namelen = strlen(path);
378 if (namelen > 0) {
379 buf = malloc(namelen, M_KTRACE, M_WAITOK);
380 bcopy(path, buf, namelen);
381 }
382 req = ktr_getrequest(KTR_NAMEI);
383 if (req == NULL) {
384 if (buf != NULL)
385 free(buf, M_KTRACE);
386 return;
387 }
388 if (namelen > 0) {
389 req->ktr_header.ktr_len = namelen;
390 req->ktr_header.ktr_buffer = buf;
391 }
392 ktr_submitrequest(req);
393 }
394
395 /*
396 * Since the uio may not stay valid, we can not hand off this request to
397 * the thread and need to process it synchronously. However, we wish to
398 * keep the relative order of records in a trace file correct, so we
399 * do put this request on the queue (if it isn't empty) and then block.
400 * The ktrace thread waks us back up when it is time for this event to
401 * be posted and blocks until we have completed writing out the event
402 * and woken it back up.
403 */
404 void
405 ktrgenio(fd, rw, uio, error)
406 int fd;
407 enum uio_rw rw;
408 struct uio *uio;
409 int error;
410 {
411 struct ktr_request *req;
412 struct ktr_genio *ktg;
413 int datalen;
414 char *buf;
415
416 if (error) {
417 free(uio, M_IOV);
418 return;
419 }
420 uio->uio_offset = 0;
421 uio->uio_rw = UIO_WRITE;
422 datalen = imin(uio->uio_resid, ktr_geniosize);
423 buf = malloc(datalen, M_KTRACE, M_WAITOK);
424 error = uiomove(buf, datalen, uio);
425 free(uio, M_IOV);
426 if (error) {
427 free(buf, M_KTRACE);
428 return;
429 }
430 req = ktr_getrequest(KTR_GENIO);
431 if (req == NULL) {
432 free(buf, M_KTRACE);
433 return;
434 }
435 ktg = &req->ktr_data.ktr_genio;
436 ktg->ktr_fd = fd;
437 ktg->ktr_rw = rw;
438 req->ktr_header.ktr_len = datalen;
439 req->ktr_header.ktr_buffer = buf;
440 ktr_submitrequest(req);
441 }
442
443 void
444 ktrpsig(sig, action, mask, code)
445 int sig;
446 sig_t action;
447 sigset_t *mask;
448 int code;
449 {
450 struct ktr_request *req;
451 struct ktr_psig *kp;
452
453 req = ktr_getrequest(KTR_PSIG);
454 if (req == NULL)
455 return;
456 kp = &req->ktr_data.ktr_psig;
457 kp->signo = (char)sig;
458 kp->action = action;
459 kp->mask = *mask;
460 kp->code = code;
461 ktr_submitrequest(req);
462 }
463
464 void
465 ktrcsw(out, user)
466 int out, user;
467 {
468 struct ktr_request *req;
469 struct ktr_csw *kc;
470
471 req = ktr_getrequest(KTR_CSW);
472 if (req == NULL)
473 return;
474 kc = &req->ktr_data.ktr_csw;
475 kc->out = out;
476 kc->user = user;
477 ktr_submitrequest(req);
478 }
479 #endif /* KTRACE */
480
481 /* Interface and common routines */
482
483 /*
484 * ktrace system call
485 *
486 * MPSAFE
487 */
488 #ifndef _SYS_SYSPROTO_H_
489 struct ktrace_args {
490 char *fname;
491 int ops;
492 int facs;
493 int pid;
494 };
495 #endif
496 /* ARGSUSED */
497 int
498 ktrace(td, uap)
499 struct thread *td;
500 register struct ktrace_args *uap;
501 {
502 #ifdef KTRACE
503 register struct vnode *vp = NULL;
504 register struct proc *p;
505 struct pgrp *pg;
506 int facs = uap->facs & ~KTRFAC_ROOT;
507 int ops = KTROP(uap->ops);
508 int descend = uap->ops & KTRFLAG_DESCEND;
509 int ret = 0;
510 int flags, error = 0;
511 struct nameidata nd;
512 struct ucred *cred;
513
514 /*
515 * Need something to (un)trace.
516 */
517 if (ops != KTROP_CLEARFILE && facs == 0)
518 return (EINVAL);
519
520 td->td_pflags |= TDP_INKTRACE;
521 if (ops != KTROP_CLEAR) {
522 /*
523 * an operation which requires a file argument.
524 */
525 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
526 flags = FREAD | FWRITE | O_NOFOLLOW;
527 mtx_lock(&Giant);
528 error = vn_open(&nd, &flags, 0, -1);
529 if (error) {
530 mtx_unlock(&Giant);
531 td->td_pflags &= ~TDP_INKTRACE;
532 return (error);
533 }
534 NDFREE(&nd, NDF_ONLY_PNBUF);
535 vp = nd.ni_vp;
536 VOP_UNLOCK(vp, 0, td);
537 if (vp->v_type != VREG) {
538 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
539 mtx_unlock(&Giant);
540 td->td_pflags &= ~TDP_INKTRACE;
541 return (EACCES);
542 }
543 mtx_unlock(&Giant);
544 }
545 /*
546 * Clear all uses of the tracefile.
547 */
548 if (ops == KTROP_CLEARFILE) {
549 sx_slock(&allproc_lock);
550 LIST_FOREACH(p, &allproc, p_list) {
551 PROC_LOCK(p);
552 if (p->p_tracevp == vp) {
553 if (ktrcanset(td, p)) {
554 mtx_lock(&ktrace_mtx);
555 cred = p->p_tracecred;
556 p->p_tracecred = NULL;
557 p->p_tracevp = NULL;
558 p->p_traceflag = 0;
559 mtx_unlock(&ktrace_mtx);
560 PROC_UNLOCK(p);
561 mtx_lock(&Giant);
562 (void) vn_close(vp, FREAD|FWRITE,
563 cred, td);
564 mtx_unlock(&Giant);
565 crfree(cred);
566 } else {
567 PROC_UNLOCK(p);
568 error = EPERM;
569 }
570 } else
571 PROC_UNLOCK(p);
572 }
573 sx_sunlock(&allproc_lock);
574 goto done;
575 }
576 /*
577 * do it
578 */
579 sx_slock(&proctree_lock);
580 if (uap->pid < 0) {
581 /*
582 * by process group
583 */
584 pg = pgfind(-uap->pid);
585 if (pg == NULL) {
586 sx_sunlock(&proctree_lock);
587 error = ESRCH;
588 goto done;
589 }
590 /*
591 * ktrops() may call vrele(). Lock pg_members
592 * by the proctree_lock rather than pg_mtx.
593 */
594 PGRP_UNLOCK(pg);
595 LIST_FOREACH(p, &pg->pg_members, p_pglist)
596 if (descend)
597 ret |= ktrsetchildren(td, p, ops, facs, vp);
598 else
599 ret |= ktrops(td, p, ops, facs, vp);
600 } else {
601 /*
602 * by pid
603 */
604 p = pfind(uap->pid);
605 if (p == NULL) {
606 sx_sunlock(&proctree_lock);
607 error = ESRCH;
608 goto done;
609 }
610 /*
611 * The slock of the proctree lock will keep this process
612 * from going away, so unlocking the proc here is ok.
613 */
614 PROC_UNLOCK(p);
615 if (descend)
616 ret |= ktrsetchildren(td, p, ops, facs, vp);
617 else
618 ret |= ktrops(td, p, ops, facs, vp);
619 }
620 sx_sunlock(&proctree_lock);
621 if (!ret)
622 error = EPERM;
623 done:
624 if (vp != NULL) {
625 mtx_lock(&Giant);
626 (void) vn_close(vp, FWRITE, td->td_ucred, td);
627 mtx_unlock(&Giant);
628 }
629 td->td_pflags &= ~TDP_INKTRACE;
630 return (error);
631 #else /* !KTRACE */
632 return (ENOSYS);
633 #endif /* KTRACE */
634 }
635
636 /*
637 * utrace system call
638 *
639 * MPSAFE
640 */
641 /* ARGSUSED */
642 int
643 utrace(td, uap)
644 struct thread *td;
645 register struct utrace_args *uap;
646 {
647
648 #ifdef KTRACE
649 struct ktr_request *req;
650 void *cp;
651 int error;
652
653 if (!KTRPOINT(td, KTR_USER))
654 return (0);
655 if (uap->len > KTR_USER_MAXLEN)
656 return (EINVAL);
657 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
658 error = copyin(uap->addr, cp, uap->len);
659 if (error) {
660 free(cp, M_KTRACE);
661 return (error);
662 }
663 req = ktr_getrequest(KTR_USER);
664 if (req == NULL) {
665 free(cp, M_KTRACE);
666 return (ENOMEM);
667 }
668 req->ktr_header.ktr_buffer = cp;
669 req->ktr_header.ktr_len = uap->len;
670 ktr_submitrequest(req);
671 return (0);
672 #else /* !KTRACE */
673 return (ENOSYS);
674 #endif /* KTRACE */
675 }
676
677 #ifdef KTRACE
678 static int
679 ktrops(td, p, ops, facs, vp)
680 struct thread *td;
681 struct proc *p;
682 int ops, facs;
683 struct vnode *vp;
684 {
685 struct vnode *tracevp = NULL;
686 struct ucred *tracecred = NULL;
687
688 PROC_LOCK(p);
689 if (!ktrcanset(td, p)) {
690 PROC_UNLOCK(p);
691 return (0);
692 }
693 mtx_lock(&ktrace_mtx);
694 if (ops == KTROP_SET) {
695 if (p->p_tracevp != vp) {
696 /*
697 * if trace file already in use, relinquish below
698 */
699 tracevp = p->p_tracevp;
700 VREF(vp);
701 p->p_tracevp = vp;
702 }
703 if (p->p_tracecred != td->td_ucred) {
704 tracecred = p->p_tracecred;
705 p->p_tracecred = crhold(td->td_ucred);
706 }
707 p->p_traceflag |= facs;
708 if (td->td_ucred->cr_uid == 0)
709 p->p_traceflag |= KTRFAC_ROOT;
710 } else {
711 /* KTROP_CLEAR */
712 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
713 /* no more tracing */
714 p->p_traceflag = 0;
715 tracevp = p->p_tracevp;
716 p->p_tracevp = NULL;
717 tracecred = p->p_tracecred;
718 p->p_tracecred = NULL;
719 }
720 }
721 mtx_unlock(&ktrace_mtx);
722 PROC_UNLOCK(p);
723 if (tracevp != NULL) {
724 mtx_lock(&Giant);
725 vrele(tracevp);
726 mtx_unlock(&Giant);
727 }
728 if (tracecred != NULL)
729 crfree(tracecred);
730
731 return (1);
732 }
733
734 static int
735 ktrsetchildren(td, top, ops, facs, vp)
736 struct thread *td;
737 struct proc *top;
738 int ops, facs;
739 struct vnode *vp;
740 {
741 register struct proc *p;
742 register int ret = 0;
743
744 p = top;
745 sx_assert(&proctree_lock, SX_LOCKED);
746 for (;;) {
747 ret |= ktrops(td, p, ops, facs, vp);
748 /*
749 * If this process has children, descend to them next,
750 * otherwise do any siblings, and if done with this level,
751 * follow back up the tree (but not past top).
752 */
753 if (!LIST_EMPTY(&p->p_children))
754 p = LIST_FIRST(&p->p_children);
755 else for (;;) {
756 if (p == top)
757 return (ret);
758 if (LIST_NEXT(p, p_sibling)) {
759 p = LIST_NEXT(p, p_sibling);
760 break;
761 }
762 p = p->p_pptr;
763 }
764 }
765 /*NOTREACHED*/
766 }
767
768 static void
769 ktr_writerequest(struct ktr_request *req)
770 {
771 struct ktr_header *kth;
772 struct vnode *vp;
773 struct proc *p;
774 struct thread *td;
775 struct ucred *cred;
776 struct uio auio;
777 struct iovec aiov[3];
778 struct mount *mp;
779 int datalen, buflen, vrele_count;
780 int error;
781
782 vp = req->ktr_vp;
783 /*
784 * If vp is NULL, the vp has been cleared out from under this
785 * request, so just drop it.
786 */
787 if (vp == NULL)
788 return;
789 kth = &req->ktr_header;
790 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
791 buflen = kth->ktr_len;
792 cred = req->ktr_cred;
793 td = curthread;
794 auio.uio_iov = &aiov[0];
795 auio.uio_offset = 0;
796 auio.uio_segflg = UIO_SYSSPACE;
797 auio.uio_rw = UIO_WRITE;
798 aiov[0].iov_base = (caddr_t)kth;
799 aiov[0].iov_len = sizeof(struct ktr_header);
800 auio.uio_resid = sizeof(struct ktr_header);
801 auio.uio_iovcnt = 1;
802 auio.uio_td = td;
803 if (datalen != 0) {
804 aiov[1].iov_base = (caddr_t)&req->ktr_data;
805 aiov[1].iov_len = datalen;
806 auio.uio_resid += datalen;
807 auio.uio_iovcnt++;
808 kth->ktr_len += datalen;
809 }
810 if (buflen != 0) {
811 KASSERT(kth->ktr_buffer != NULL, ("ktrace: nothing to write"));
812 aiov[auio.uio_iovcnt].iov_base = kth->ktr_buffer;
813 aiov[auio.uio_iovcnt].iov_len = buflen;
814 auio.uio_resid += buflen;
815 auio.uio_iovcnt++;
816 }
817 mtx_lock(&Giant);
818 vn_start_write(vp, &mp, V_WAIT);
819 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
820 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
821 #ifdef MAC
822 error = mac_check_vnode_write(cred, NOCRED, vp);
823 if (error == 0)
824 #endif
825 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
826 VOP_UNLOCK(vp, 0, td);
827 vn_finished_write(mp);
828 mtx_unlock(&Giant);
829 if (!error)
830 return;
831 /*
832 * If error encountered, give up tracing on this vnode. We defer
833 * all the vrele()'s on the vnode until after we are finished walking
834 * the various lists to avoid needlessly holding locks.
835 */
836 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
837 error);
838 vrele_count = 0;
839 /*
840 * First, clear this vnode from being used by any processes in the
841 * system.
842 * XXX - If one process gets an EPERM writing to the vnode, should
843 * we really do this? Other processes might have suitable
844 * credentials for the operation.
845 */
846 cred = NULL;
847 sx_slock(&allproc_lock);
848 LIST_FOREACH(p, &allproc, p_list) {
849 PROC_LOCK(p);
850 if (p->p_tracevp == vp) {
851 mtx_lock(&ktrace_mtx);
852 p->p_tracevp = NULL;
853 p->p_traceflag = 0;
854 cred = p->p_tracecred;
855 p->p_tracecred = NULL;
856 mtx_unlock(&ktrace_mtx);
857 vrele_count++;
858 }
859 PROC_UNLOCK(p);
860 if (cred != NULL) {
861 crfree(cred);
862 cred = NULL;
863 }
864 }
865 sx_sunlock(&allproc_lock);
866 /*
867 * Second, clear this vnode from any pending requests.
868 */
869 mtx_lock(&ktrace_mtx);
870 STAILQ_FOREACH(req, &ktr_todo, ktr_list) {
871 if (req->ktr_vp == vp) {
872 req->ktr_vp = NULL;
873 vrele_count++;
874 }
875 }
876 mtx_unlock(&ktrace_mtx);
877 mtx_lock(&Giant);
878 while (vrele_count-- > 0)
879 vrele(vp);
880 mtx_unlock(&Giant);
881 }
882
883 /*
884 * Return true if caller has permission to set the ktracing state
885 * of target. Essentially, the target can't possess any
886 * more permissions than the caller. KTRFAC_ROOT signifies that
887 * root previously set the tracing status on the target process, and
888 * so, only root may further change it.
889 */
890 static int
891 ktrcanset(td, targetp)
892 struct thread *td;
893 struct proc *targetp;
894 {
895
896 PROC_LOCK_ASSERT(targetp, MA_OWNED);
897 if (targetp->p_traceflag & KTRFAC_ROOT &&
898 suser_cred(td->td_ucred, SUSER_ALLOWJAIL))
899 return (0);
900
901 if (p_candebug(td, targetp) != 0)
902 return (0);
903
904 return (1);
905 }
906
907 #endif /* KTRACE */
Cache object: db9a6fa24a51467c744407e7d3be84d9
|