1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_ktrace.h"
38 #include "opt_mac.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
55 #include <sys/stat.h>
56 #include <sys/ktrace.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/sysproto.h>
61
62 #include <security/mac/mac_framework.h>
63
64 /*
65 * The ktrace facility allows the tracing of certain key events in user space
66 * processes, such as system calls, signal delivery, context switches, and
67 * user generated events using utrace(2). It works by streaming event
68 * records and data to a vnode associated with the process using the
69 * ktrace(2) system call. In general, records can be written directly from
70 * the context that generates the event. One important exception to this is
71 * during a context switch, where sleeping is not permitted. To handle this
72 * case, trace events are generated using in-kernel ktr_request records, and
73 * then delivered to disk at a convenient moment -- either immediately, the
74 * next traceable event, at system call return, or at process exit.
75 *
76 * When dealing with multiple threads or processes writing to the same event
77 * log, ordering guarantees are weak: specifically, if an event has multiple
78 * records (i.e., system call enter and return), they may be interlaced with
79 * records from another event. Process and thread ID information is provided
80 * in the record, and user applications can de-interlace events if required.
81 */
82
83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
84
85 #ifdef KTRACE
86
87 #ifndef KTRACE_REQUEST_POOL
88 #define KTRACE_REQUEST_POOL 100
89 #endif
90
91 struct ktr_request {
92 struct ktr_header ktr_header;
93 void *ktr_buffer;
94 union {
95 struct ktr_syscall ktr_syscall;
96 struct ktr_sysret ktr_sysret;
97 struct ktr_genio ktr_genio;
98 struct ktr_psig ktr_psig;
99 struct ktr_csw ktr_csw;
100 } ktr_data;
101 STAILQ_ENTRY(ktr_request) ktr_list;
102 };
103
104 static int data_lengths[] = {
105 0, /* none */
106 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
107 sizeof(struct ktr_sysret), /* KTR_SYSRET */
108 0, /* KTR_NAMEI */
109 sizeof(struct ktr_genio), /* KTR_GENIO */
110 sizeof(struct ktr_psig), /* KTR_PSIG */
111 sizeof(struct ktr_csw), /* KTR_CSW */
112 0, /* KTR_USER */
113 0, /* KTR_STRUCT */
114 0, /* KTR_SYSCTL */
115 };
116
117 static STAILQ_HEAD(, ktr_request) ktr_free;
118
119 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
120
121 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
122 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
123
124 static u_int ktr_geniosize = PAGE_SIZE;
125 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
126 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
127 0, "Maximum size of genio event payload");
128
129 static int print_message = 1;
130 static struct mtx ktrace_mtx;
131 static struct sx ktrace_sx;
132
133 static void ktrace_init(void *dummy);
134 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
135 static u_int ktrace_resize_pool(u_int newsize);
136 static struct ktr_request *ktr_getrequest(int type);
137 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
138 static void ktr_freeproc(struct proc *p, struct ucred **uc,
139 struct vnode **vp);
140 static void ktr_freerequest(struct ktr_request *req);
141 static void ktr_freerequest_locked(struct ktr_request *req);
142 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
143 static int ktrcanset(struct thread *,struct proc *);
144 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
145 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
146
147 /*
148 * ktrace itself generates events, such as context switches, which we do not
149 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
150 * whether or not it is in a region where tracing of events should be
151 * suppressed.
152 */
153 static void
154 ktrace_enter(struct thread *td)
155 {
156
157 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
158 td->td_pflags |= TDP_INKTRACE;
159 }
160
161 static void
162 ktrace_exit(struct thread *td)
163 {
164
165 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
166 td->td_pflags &= ~TDP_INKTRACE;
167 }
168
169 static void
170 ktrace_assert(struct thread *td)
171 {
172
173 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
174 }
175
176 static void
177 ktrace_init(void *dummy)
178 {
179 struct ktr_request *req;
180 int i;
181
182 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
183 sx_init(&ktrace_sx, "ktrace_sx");
184 STAILQ_INIT(&ktr_free);
185 for (i = 0; i < ktr_requestpool; i++) {
186 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
187 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
188 }
189 }
190 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
191
192 static int
193 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
194 {
195 struct thread *td;
196 u_int newsize, oldsize, wantsize;
197 int error;
198
199 /* Handle easy read-only case first to avoid warnings from GCC. */
200 if (!req->newptr) {
201 mtx_lock(&ktrace_mtx);
202 oldsize = ktr_requestpool;
203 mtx_unlock(&ktrace_mtx);
204 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
205 }
206
207 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
208 if (error)
209 return (error);
210 td = curthread;
211 ktrace_enter(td);
212 mtx_lock(&ktrace_mtx);
213 oldsize = ktr_requestpool;
214 newsize = ktrace_resize_pool(wantsize);
215 mtx_unlock(&ktrace_mtx);
216 ktrace_exit(td);
217 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
218 if (error)
219 return (error);
220 if (wantsize > oldsize && newsize < wantsize)
221 return (ENOSPC);
222 return (0);
223 }
224 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
225 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
226
227 static u_int
228 ktrace_resize_pool(u_int newsize)
229 {
230 struct ktr_request *req;
231 int bound;
232
233 mtx_assert(&ktrace_mtx, MA_OWNED);
234 print_message = 1;
235 bound = newsize - ktr_requestpool;
236 if (bound == 0)
237 return (ktr_requestpool);
238 if (bound < 0)
239 /* Shrink pool down to newsize if possible. */
240 while (bound++ < 0) {
241 req = STAILQ_FIRST(&ktr_free);
242 if (req == NULL)
243 return (ktr_requestpool);
244 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
245 ktr_requestpool--;
246 mtx_unlock(&ktrace_mtx);
247 free(req, M_KTRACE);
248 mtx_lock(&ktrace_mtx);
249 }
250 else
251 /* Grow pool up to newsize. */
252 while (bound-- > 0) {
253 mtx_unlock(&ktrace_mtx);
254 req = malloc(sizeof(struct ktr_request), M_KTRACE,
255 M_WAITOK);
256 mtx_lock(&ktrace_mtx);
257 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
258 ktr_requestpool++;
259 }
260 return (ktr_requestpool);
261 }
262
263 /* ktr_getrequest() assumes that ktr_comm[] is the same size as p_comm[]. */
264 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
265 (sizeof((struct proc *)NULL)->p_comm));
266
267 static struct ktr_request *
268 ktr_getrequest(int type)
269 {
270 struct ktr_request *req;
271 struct thread *td = curthread;
272 struct proc *p = td->td_proc;
273 int pm;
274
275 ktrace_enter(td); /* XXX: In caller instead? */
276 mtx_lock(&ktrace_mtx);
277 if (!KTRCHECK(td, type)) {
278 mtx_unlock(&ktrace_mtx);
279 ktrace_exit(td);
280 return (NULL);
281 }
282 req = STAILQ_FIRST(&ktr_free);
283 if (req != NULL) {
284 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
285 req->ktr_header.ktr_type = type;
286 if (p->p_traceflag & KTRFAC_DROP) {
287 req->ktr_header.ktr_type |= KTR_DROP;
288 p->p_traceflag &= ~KTRFAC_DROP;
289 }
290 mtx_unlock(&ktrace_mtx);
291 microtime(&req->ktr_header.ktr_time);
292 req->ktr_header.ktr_pid = p->p_pid;
293 req->ktr_header.ktr_tid = td->td_tid;
294 bcopy(p->p_comm, req->ktr_header.ktr_comm,
295 sizeof(req->ktr_header.ktr_comm));
296 req->ktr_buffer = NULL;
297 req->ktr_header.ktr_len = 0;
298 } else {
299 p->p_traceflag |= KTRFAC_DROP;
300 pm = print_message;
301 print_message = 0;
302 mtx_unlock(&ktrace_mtx);
303 if (pm)
304 printf("Out of ktrace request objects.\n");
305 ktrace_exit(td);
306 }
307 return (req);
308 }
309
310 /*
311 * Some trace generation environments don't permit direct access to VFS,
312 * such as during a context switch where sleeping is not allowed. Under these
313 * circumstances, queue a request to the thread to be written asynchronously
314 * later.
315 */
316 static void
317 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
318 {
319
320 mtx_lock(&ktrace_mtx);
321 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
322 mtx_unlock(&ktrace_mtx);
323 ktrace_exit(td);
324 }
325
326 /*
327 * Drain any pending ktrace records from the per-thread queue to disk. This
328 * is used both internally before committing other records, and also on
329 * system call return. We drain all the ones we can find at the time when
330 * drain is requested, but don't keep draining after that as those events
331 * may be approximately "after" the current event.
332 */
333 static void
334 ktr_drain(struct thread *td)
335 {
336 struct ktr_request *queued_req;
337 STAILQ_HEAD(, ktr_request) local_queue;
338
339 ktrace_assert(td);
340 sx_assert(&ktrace_sx, SX_XLOCKED);
341
342 STAILQ_INIT(&local_queue); /* XXXRW: needed? */
343
344 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
345 mtx_lock(&ktrace_mtx);
346 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
347 mtx_unlock(&ktrace_mtx);
348
349 while ((queued_req = STAILQ_FIRST(&local_queue))) {
350 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
351 ktr_writerequest(td, queued_req);
352 ktr_freerequest(queued_req);
353 }
354 }
355 }
356
357 /*
358 * Submit a trace record for immediate commit to disk -- to be used only
359 * where entering VFS is OK. First drain any pending records that may have
360 * been cached in the thread.
361 */
362 static void
363 ktr_submitrequest(struct thread *td, struct ktr_request *req)
364 {
365
366 ktrace_assert(td);
367
368 sx_xlock(&ktrace_sx);
369 ktr_drain(td);
370 ktr_writerequest(td, req);
371 ktr_freerequest(req);
372 sx_xunlock(&ktrace_sx);
373
374 ktrace_exit(td);
375 }
376
377 static void
378 ktr_freerequest(struct ktr_request *req)
379 {
380
381 mtx_lock(&ktrace_mtx);
382 ktr_freerequest_locked(req);
383 mtx_unlock(&ktrace_mtx);
384 }
385
386 static void
387 ktr_freerequest_locked(struct ktr_request *req)
388 {
389
390 mtx_assert(&ktrace_mtx, MA_OWNED);
391 if (req->ktr_buffer != NULL)
392 free(req->ktr_buffer, M_KTRACE);
393 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
394 }
395
396 /*
397 * Disable tracing for a process and release all associated resources.
398 * The caller is responsible for releasing a reference on the returned
399 * vnode and credentials.
400 */
401 static void
402 ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp)
403 {
404 struct ktr_request *req;
405
406 PROC_LOCK_ASSERT(p, MA_OWNED);
407 mtx_assert(&ktrace_mtx, MA_OWNED);
408 *uc = p->p_tracecred;
409 p->p_tracecred = NULL;
410 if (vp != NULL)
411 *vp = p->p_tracevp;
412 p->p_tracevp = NULL;
413 p->p_traceflag = 0;
414 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
415 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
416 ktr_freerequest_locked(req);
417 }
418 }
419
420 void
421 ktrsyscall(code, narg, args)
422 int code, narg;
423 register_t args[];
424 {
425 struct ktr_request *req;
426 struct ktr_syscall *ktp;
427 size_t buflen;
428 char *buf = NULL;
429
430 buflen = sizeof(register_t) * narg;
431 if (buflen > 0) {
432 buf = malloc(buflen, M_KTRACE, M_WAITOK);
433 bcopy(args, buf, buflen);
434 }
435 req = ktr_getrequest(KTR_SYSCALL);
436 if (req == NULL) {
437 if (buf != NULL)
438 free(buf, M_KTRACE);
439 return;
440 }
441 ktp = &req->ktr_data.ktr_syscall;
442 ktp->ktr_code = code;
443 ktp->ktr_narg = narg;
444 if (buflen > 0) {
445 req->ktr_header.ktr_len = buflen;
446 req->ktr_buffer = buf;
447 }
448 ktr_submitrequest(curthread, req);
449 }
450
451 void
452 ktrsysret(code, error, retval)
453 int code, error;
454 register_t retval;
455 {
456 struct ktr_request *req;
457 struct ktr_sysret *ktp;
458
459 req = ktr_getrequest(KTR_SYSRET);
460 if (req == NULL)
461 return;
462 ktp = &req->ktr_data.ktr_sysret;
463 ktp->ktr_code = code;
464 ktp->ktr_error = error;
465 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */
466 ktr_submitrequest(curthread, req);
467 }
468
469 /*
470 * When a setuid process execs, disable tracing.
471 *
472 * XXX: We toss any pending asynchronous records.
473 */
474 void
475 ktrprocexec(struct proc *p, struct ucred **uc, struct vnode **vp)
476 {
477
478 PROC_LOCK_ASSERT(p, MA_OWNED);
479 mtx_lock(&ktrace_mtx);
480 ktr_freeproc(p, uc, vp);
481 mtx_unlock(&ktrace_mtx);
482 }
483
484 /*
485 * When a process exits, drain per-process asynchronous trace records
486 * and disable tracing.
487 */
488 void
489 ktrprocexit(struct thread *td)
490 {
491 struct proc *p;
492 struct ucred *cred;
493 struct vnode *vp;
494 int vfslocked;
495
496 p = td->td_proc;
497 if (p->p_traceflag == 0)
498 return;
499
500 ktrace_enter(td);
501 sx_xlock(&ktrace_sx);
502 ktr_drain(td);
503 sx_xunlock(&ktrace_sx);
504 PROC_LOCK(p);
505 mtx_lock(&ktrace_mtx);
506 ktr_freeproc(p, &cred, &vp);
507 mtx_unlock(&ktrace_mtx);
508 PROC_UNLOCK(p);
509 if (vp != NULL) {
510 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
511 vrele(vp);
512 VFS_UNLOCK_GIANT(vfslocked);
513 }
514 if (cred != NULL)
515 crfree(cred);
516 ktrace_exit(td);
517 }
518
519 /*
520 * When a process forks, enable tracing in the new process if needed.
521 */
522 void
523 ktrprocfork(struct proc *p1, struct proc *p2)
524 {
525
526 PROC_LOCK_ASSERT(p1, MA_OWNED);
527 PROC_LOCK_ASSERT(p2, MA_OWNED);
528 mtx_lock(&ktrace_mtx);
529 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
530 if (p1->p_traceflag & KTRFAC_INHERIT) {
531 p2->p_traceflag = p1->p_traceflag;
532 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
533 VREF(p2->p_tracevp);
534 KASSERT(p1->p_tracecred != NULL,
535 ("ktrace vnode with no cred"));
536 p2->p_tracecred = crhold(p1->p_tracecred);
537 }
538 }
539 mtx_unlock(&ktrace_mtx);
540 }
541
542 /*
543 * When a thread returns, drain any asynchronous records generated by the
544 * system call.
545 */
546 void
547 ktruserret(struct thread *td)
548 {
549
550 ktrace_enter(td);
551 sx_xlock(&ktrace_sx);
552 ktr_drain(td);
553 sx_xunlock(&ktrace_sx);
554 ktrace_exit(td);
555 }
556
557 void
558 ktrnamei(path)
559 char *path;
560 {
561 struct ktr_request *req;
562 int namelen;
563 char *buf = NULL;
564
565 namelen = strlen(path);
566 if (namelen > 0) {
567 buf = malloc(namelen, M_KTRACE, M_WAITOK);
568 bcopy(path, buf, namelen);
569 }
570 req = ktr_getrequest(KTR_NAMEI);
571 if (req == NULL) {
572 if (buf != NULL)
573 free(buf, M_KTRACE);
574 return;
575 }
576 if (namelen > 0) {
577 req->ktr_header.ktr_len = namelen;
578 req->ktr_buffer = buf;
579 }
580 ktr_submitrequest(curthread, req);
581 }
582
583 void
584 ktrsysctl(name, namelen)
585 int *name;
586 u_int namelen;
587 {
588 struct ktr_request *req;
589 u_int mib[CTL_MAXNAME + 2];
590 char *mibname;
591 size_t mibnamelen;
592 int error;
593
594 /* Lookup name of mib. */
595 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
596 mib[0] = 0;
597 mib[1] = 1;
598 bcopy(name, mib + 2, namelen * sizeof(*name));
599 mibnamelen = 128;
600 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
601 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
602 NULL, 0, &mibnamelen, 0);
603 if (error) {
604 free(mibname, M_KTRACE);
605 return;
606 }
607 req = ktr_getrequest(KTR_SYSCTL);
608 if (req == NULL) {
609 free(mibname, M_KTRACE);
610 return;
611 }
612 req->ktr_header.ktr_len = mibnamelen;
613 req->ktr_buffer = mibname;
614 ktr_submitrequest(curthread, req);
615 }
616
617 void
618 ktrgenio(fd, rw, uio, error)
619 int fd;
620 enum uio_rw rw;
621 struct uio *uio;
622 int error;
623 {
624 struct ktr_request *req;
625 struct ktr_genio *ktg;
626 int datalen;
627 char *buf;
628
629 if (error) {
630 free(uio, M_IOV);
631 return;
632 }
633 uio->uio_offset = 0;
634 uio->uio_rw = UIO_WRITE;
635 datalen = imin(uio->uio_resid, ktr_geniosize);
636 buf = malloc(datalen, M_KTRACE, M_WAITOK);
637 error = uiomove(buf, datalen, uio);
638 free(uio, M_IOV);
639 if (error) {
640 free(buf, M_KTRACE);
641 return;
642 }
643 req = ktr_getrequest(KTR_GENIO);
644 if (req == NULL) {
645 free(buf, M_KTRACE);
646 return;
647 }
648 ktg = &req->ktr_data.ktr_genio;
649 ktg->ktr_fd = fd;
650 ktg->ktr_rw = rw;
651 req->ktr_header.ktr_len = datalen;
652 req->ktr_buffer = buf;
653 ktr_submitrequest(curthread, req);
654 }
655
656 void
657 ktrpsig(sig, action, mask, code)
658 int sig;
659 sig_t action;
660 sigset_t *mask;
661 int code;
662 {
663 struct ktr_request *req;
664 struct ktr_psig *kp;
665
666 req = ktr_getrequest(KTR_PSIG);
667 if (req == NULL)
668 return;
669 kp = &req->ktr_data.ktr_psig;
670 kp->signo = (char)sig;
671 kp->action = action;
672 kp->mask = *mask;
673 kp->code = code;
674 ktr_enqueuerequest(curthread, req);
675 }
676
677 void
678 ktrcsw(out, user)
679 int out, user;
680 {
681 struct ktr_request *req;
682 struct ktr_csw *kc;
683
684 req = ktr_getrequest(KTR_CSW);
685 if (req == NULL)
686 return;
687 kc = &req->ktr_data.ktr_csw;
688 kc->out = out;
689 kc->user = user;
690 ktr_enqueuerequest(curthread, req);
691 }
692
693 void
694 ktrstruct(name, namelen, data, datalen)
695 const char *name;
696 size_t namelen;
697 void *data;
698 size_t datalen;
699 {
700 struct ktr_request *req;
701 char *buf = NULL;
702 size_t buflen;
703
704 if (!data)
705 datalen = 0;
706 buflen = namelen + 1 + datalen;
707 buf = malloc(buflen, M_KTRACE, M_WAITOK);
708 bcopy(name, buf, namelen);
709 buf[namelen] = '\0';
710 bcopy(data, buf + namelen + 1, datalen);
711 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
712 free(buf, M_KTRACE);
713 return;
714 }
715 req->ktr_buffer = buf;
716 req->ktr_header.ktr_len = buflen;
717 ktr_submitrequest(curthread, req);
718 }
719 #endif /* KTRACE */
720
721 /* Interface and common routines */
722
723 #ifndef _SYS_SYSPROTO_H_
724 struct ktrace_args {
725 char *fname;
726 int ops;
727 int facs;
728 int pid;
729 };
730 #endif
731 /* ARGSUSED */
732 int
733 ktrace(td, uap)
734 struct thread *td;
735 register struct ktrace_args *uap;
736 {
737 #ifdef KTRACE
738 register struct vnode *vp = NULL;
739 register struct proc *p;
740 struct pgrp *pg;
741 int facs = uap->facs & ~KTRFAC_ROOT;
742 int ops = KTROP(uap->ops);
743 int descend = uap->ops & KTRFLAG_DESCEND;
744 int nfound, ret = 0;
745 int flags, error = 0, vfslocked;
746 struct nameidata nd;
747 struct ucred *cred;
748
749 /*
750 * Need something to (un)trace.
751 */
752 if (ops != KTROP_CLEARFILE && facs == 0)
753 return (EINVAL);
754
755 ktrace_enter(td);
756 if (ops != KTROP_CLEAR) {
757 /*
758 * an operation which requires a file argument.
759 */
760 NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE,
761 uap->fname, td);
762 flags = FREAD | FWRITE | O_NOFOLLOW;
763 error = vn_open(&nd, &flags, 0, NULL);
764 if (error) {
765 ktrace_exit(td);
766 return (error);
767 }
768 vfslocked = NDHASGIANT(&nd);
769 NDFREE(&nd, NDF_ONLY_PNBUF);
770 vp = nd.ni_vp;
771 VOP_UNLOCK(vp, 0, td);
772 if (vp->v_type != VREG) {
773 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
774 VFS_UNLOCK_GIANT(vfslocked);
775 ktrace_exit(td);
776 return (EACCES);
777 }
778 VFS_UNLOCK_GIANT(vfslocked);
779 }
780 /*
781 * Clear all uses of the tracefile.
782 */
783 if (ops == KTROP_CLEARFILE) {
784 int vrele_count;
785
786 vrele_count = 0;
787 sx_slock(&allproc_lock);
788 FOREACH_PROC_IN_SYSTEM(p) {
789 PROC_LOCK(p);
790 if (p->p_tracevp == vp) {
791 if (ktrcanset(td, p)) {
792 mtx_lock(&ktrace_mtx);
793 ktr_freeproc(p, &cred, NULL);
794 mtx_unlock(&ktrace_mtx);
795 vrele_count++;
796 crfree(cred);
797 } else
798 error = EPERM;
799 }
800 PROC_UNLOCK(p);
801 }
802 sx_sunlock(&allproc_lock);
803 if (vrele_count > 0) {
804 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
805 while (vrele_count-- > 0)
806 vrele(vp);
807 VFS_UNLOCK_GIANT(vfslocked);
808 }
809 goto done;
810 }
811 /*
812 * do it
813 */
814 sx_slock(&proctree_lock);
815 if (uap->pid < 0) {
816 /*
817 * by process group
818 */
819 pg = pgfind(-uap->pid);
820 if (pg == NULL) {
821 sx_sunlock(&proctree_lock);
822 error = ESRCH;
823 goto done;
824 }
825 /*
826 * ktrops() may call vrele(). Lock pg_members
827 * by the proctree_lock rather than pg_mtx.
828 */
829 PGRP_UNLOCK(pg);
830 nfound = 0;
831 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
832 PROC_LOCK(p);
833 if (p_cansee(td, p) != 0) {
834 PROC_UNLOCK(p);
835 continue;
836 }
837 PROC_UNLOCK(p);
838 nfound++;
839 if (descend)
840 ret |= ktrsetchildren(td, p, ops, facs, vp);
841 else
842 ret |= ktrops(td, p, ops, facs, vp);
843 }
844 if (nfound == 0) {
845 sx_sunlock(&proctree_lock);
846 error = ESRCH;
847 goto done;
848 }
849 } else {
850 /*
851 * by pid
852 */
853 p = pfind(uap->pid);
854 if (p == NULL) {
855 sx_sunlock(&proctree_lock);
856 error = ESRCH;
857 goto done;
858 }
859 error = p_cansee(td, p);
860 /*
861 * The slock of the proctree lock will keep this process
862 * from going away, so unlocking the proc here is ok.
863 */
864 PROC_UNLOCK(p);
865 if (error) {
866 sx_sunlock(&proctree_lock);
867 goto done;
868 }
869 if (descend)
870 ret |= ktrsetchildren(td, p, ops, facs, vp);
871 else
872 ret |= ktrops(td, p, ops, facs, vp);
873 }
874 sx_sunlock(&proctree_lock);
875 if (!ret)
876 error = EPERM;
877 done:
878 if (vp != NULL) {
879 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
880 (void) vn_close(vp, FWRITE, td->td_ucred, td);
881 VFS_UNLOCK_GIANT(vfslocked);
882 }
883 ktrace_exit(td);
884 return (error);
885 #else /* !KTRACE */
886 return (ENOSYS);
887 #endif /* KTRACE */
888 }
889
890 /* ARGSUSED */
891 int
892 utrace(td, uap)
893 struct thread *td;
894 register struct utrace_args *uap;
895 {
896
897 #ifdef KTRACE
898 struct ktr_request *req;
899 void *cp;
900 int error;
901
902 if (!KTRPOINT(td, KTR_USER))
903 return (0);
904 if (uap->len > KTR_USER_MAXLEN)
905 return (EINVAL);
906 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
907 error = copyin(uap->addr, cp, uap->len);
908 if (error) {
909 free(cp, M_KTRACE);
910 return (error);
911 }
912 req = ktr_getrequest(KTR_USER);
913 if (req == NULL) {
914 free(cp, M_KTRACE);
915 return (ENOMEM);
916 }
917 req->ktr_buffer = cp;
918 req->ktr_header.ktr_len = uap->len;
919 ktr_submitrequest(td, req);
920 return (0);
921 #else /* !KTRACE */
922 return (ENOSYS);
923 #endif /* KTRACE */
924 }
925
926 #ifdef KTRACE
927 static int
928 ktrops(td, p, ops, facs, vp)
929 struct thread *td;
930 struct proc *p;
931 int ops, facs;
932 struct vnode *vp;
933 {
934 struct vnode *tracevp = NULL;
935 struct ucred *tracecred = NULL;
936
937 PROC_LOCK(p);
938 if (!ktrcanset(td, p)) {
939 PROC_UNLOCK(p);
940 return (0);
941 }
942 mtx_lock(&ktrace_mtx);
943 if (ops == KTROP_SET) {
944 if (p->p_tracevp != vp) {
945 /*
946 * if trace file already in use, relinquish below
947 */
948 tracevp = p->p_tracevp;
949 VREF(vp);
950 p->p_tracevp = vp;
951 }
952 if (p->p_tracecred != td->td_ucred) {
953 tracecred = p->p_tracecred;
954 p->p_tracecred = crhold(td->td_ucred);
955 }
956 p->p_traceflag |= facs;
957 if (priv_check(td, PRIV_KTRACE) == 0)
958 p->p_traceflag |= KTRFAC_ROOT;
959 } else {
960 /* KTROP_CLEAR */
961 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
962 /* no more tracing */
963 ktr_freeproc(p, &tracecred, &tracevp);
964 }
965 mtx_unlock(&ktrace_mtx);
966 PROC_UNLOCK(p);
967 if (tracevp != NULL) {
968 int vfslocked;
969
970 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
971 vrele(tracevp);
972 VFS_UNLOCK_GIANT(vfslocked);
973 }
974 if (tracecred != NULL)
975 crfree(tracecred);
976
977 return (1);
978 }
979
980 static int
981 ktrsetchildren(td, top, ops, facs, vp)
982 struct thread *td;
983 struct proc *top;
984 int ops, facs;
985 struct vnode *vp;
986 {
987 register struct proc *p;
988 register int ret = 0;
989
990 p = top;
991 sx_assert(&proctree_lock, SX_LOCKED);
992 for (;;) {
993 ret |= ktrops(td, p, ops, facs, vp);
994 /*
995 * If this process has children, descend to them next,
996 * otherwise do any siblings, and if done with this level,
997 * follow back up the tree (but not past top).
998 */
999 if (!LIST_EMPTY(&p->p_children))
1000 p = LIST_FIRST(&p->p_children);
1001 else for (;;) {
1002 if (p == top)
1003 return (ret);
1004 if (LIST_NEXT(p, p_sibling)) {
1005 p = LIST_NEXT(p, p_sibling);
1006 break;
1007 }
1008 p = p->p_pptr;
1009 }
1010 }
1011 /*NOTREACHED*/
1012 }
1013
1014 static void
1015 ktr_writerequest(struct thread *td, struct ktr_request *req)
1016 {
1017 struct ktr_header *kth;
1018 struct vnode *vp;
1019 struct proc *p;
1020 struct ucred *cred;
1021 struct uio auio;
1022 struct iovec aiov[3];
1023 struct mount *mp;
1024 int datalen, buflen, vrele_count;
1025 int error, vfslocked;
1026
1027 /*
1028 * We hold the vnode and credential for use in I/O in case ktrace is
1029 * disabled on the process as we write out the request.
1030 *
1031 * XXXRW: This is not ideal: we could end up performing a write after
1032 * the vnode has been closed.
1033 */
1034 mtx_lock(&ktrace_mtx);
1035 vp = td->td_proc->p_tracevp;
1036 cred = td->td_proc->p_tracecred;
1037
1038 /*
1039 * If vp is NULL, the vp has been cleared out from under this
1040 * request, so just drop it. Make sure the credential and vnode are
1041 * in sync: we should have both or neither.
1042 */
1043 if (vp == NULL) {
1044 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
1045 mtx_unlock(&ktrace_mtx);
1046 return;
1047 }
1048 VREF(vp);
1049 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
1050 crhold(cred);
1051 mtx_unlock(&ktrace_mtx);
1052
1053 kth = &req->ktr_header;
1054 KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) <
1055 sizeof(data_lengths) / sizeof(data_lengths[0]),
1056 ("data_lengths array overflow"));
1057 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
1058 buflen = kth->ktr_len;
1059 auio.uio_iov = &aiov[0];
1060 auio.uio_offset = 0;
1061 auio.uio_segflg = UIO_SYSSPACE;
1062 auio.uio_rw = UIO_WRITE;
1063 aiov[0].iov_base = (caddr_t)kth;
1064 aiov[0].iov_len = sizeof(struct ktr_header);
1065 auio.uio_resid = sizeof(struct ktr_header);
1066 auio.uio_iovcnt = 1;
1067 auio.uio_td = td;
1068 if (datalen != 0) {
1069 aiov[1].iov_base = (caddr_t)&req->ktr_data;
1070 aiov[1].iov_len = datalen;
1071 auio.uio_resid += datalen;
1072 auio.uio_iovcnt++;
1073 kth->ktr_len += datalen;
1074 }
1075 if (buflen != 0) {
1076 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
1077 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
1078 aiov[auio.uio_iovcnt].iov_len = buflen;
1079 auio.uio_resid += buflen;
1080 auio.uio_iovcnt++;
1081 }
1082
1083 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1084 vn_start_write(vp, &mp, V_WAIT);
1085 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1086 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
1087 #ifdef MAC
1088 error = mac_check_vnode_write(cred, NOCRED, vp);
1089 if (error == 0)
1090 #endif
1091 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
1092 VOP_UNLOCK(vp, 0, td);
1093 vn_finished_write(mp);
1094 crfree(cred);
1095 if (!error) {
1096 vrele(vp);
1097 VFS_UNLOCK_GIANT(vfslocked);
1098 return;
1099 }
1100 VFS_UNLOCK_GIANT(vfslocked);
1101
1102 /*
1103 * If error encountered, give up tracing on this vnode. We defer
1104 * all the vrele()'s on the vnode until after we are finished walking
1105 * the various lists to avoid needlessly holding locks.
1106 * NB: at this point we still hold the vnode reference that must
1107 * not go away as we need the valid vnode to compare with. Thus let
1108 * vrele_count start at 1 and the reference will be freed
1109 * by the loop at the end after our last use of vp.
1110 */
1111 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
1112 error);
1113 vrele_count = 1;
1114 /*
1115 * First, clear this vnode from being used by any processes in the
1116 * system.
1117 * XXX - If one process gets an EPERM writing to the vnode, should
1118 * we really do this? Other processes might have suitable
1119 * credentials for the operation.
1120 */
1121 cred = NULL;
1122 sx_slock(&allproc_lock);
1123 FOREACH_PROC_IN_SYSTEM(p) {
1124 PROC_LOCK(p);
1125 if (p->p_tracevp == vp) {
1126 mtx_lock(&ktrace_mtx);
1127 ktr_freeproc(p, &cred, NULL);
1128 mtx_unlock(&ktrace_mtx);
1129 vrele_count++;
1130 }
1131 PROC_UNLOCK(p);
1132 if (cred != NULL) {
1133 crfree(cred);
1134 cred = NULL;
1135 }
1136 }
1137 sx_sunlock(&allproc_lock);
1138
1139 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1140 while (vrele_count-- > 0)
1141 vrele(vp);
1142 VFS_UNLOCK_GIANT(vfslocked);
1143 }
1144
1145 /*
1146 * Return true if caller has permission to set the ktracing state
1147 * of target. Essentially, the target can't possess any
1148 * more permissions than the caller. KTRFAC_ROOT signifies that
1149 * root previously set the tracing status on the target process, and
1150 * so, only root may further change it.
1151 */
1152 static int
1153 ktrcanset(td, targetp)
1154 struct thread *td;
1155 struct proc *targetp;
1156 {
1157
1158 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1159 if (targetp->p_traceflag & KTRFAC_ROOT &&
1160 priv_check(td, PRIV_KTRACE))
1161 return (0);
1162
1163 if (p_candebug(td, targetp) != 0)
1164 return (0);
1165
1166 return (1);
1167 }
1168
1169 #endif /* KTRACE */
Cache object: b8dea72a0603216dfc7018b2896662b6
|