1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005 Robert N. M. Watson
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_ktrace.h"
40
41 #include <sys/param.h>
42 #include <sys/capsicum.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/unistd.h>
55 #include <sys/vnode.h>
56 #include <sys/socket.h>
57 #include <sys/stat.h>
58 #include <sys/ktrace.h>
59 #include <sys/sx.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysent.h>
62 #include <sys/syslog.h>
63 #include <sys/sysproto.h>
64
65 #include <security/mac/mac_framework.h>
66
67 /*
68 * The ktrace facility allows the tracing of certain key events in user space
69 * processes, such as system calls, signal delivery, context switches, and
70 * user generated events using utrace(2). It works by streaming event
71 * records and data to a vnode associated with the process using the
72 * ktrace(2) system call. In general, records can be written directly from
73 * the context that generates the event. One important exception to this is
74 * during a context switch, where sleeping is not permitted. To handle this
75 * case, trace events are generated using in-kernel ktr_request records, and
76 * then delivered to disk at a convenient moment -- either immediately, the
77 * next traceable event, at system call return, or at process exit.
78 *
79 * When dealing with multiple threads or processes writing to the same event
80 * log, ordering guarantees are weak: specifically, if an event has multiple
81 * records (i.e., system call enter and return), they may be interlaced with
82 * records from another event. Process and thread ID information is provided
83 * in the record, and user applications can de-interlace events if required.
84 */
85
86 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
87
88 #ifdef KTRACE
89
90 FEATURE(ktrace, "Kernel support for system-call tracing");
91
92 #ifndef KTRACE_REQUEST_POOL
93 #define KTRACE_REQUEST_POOL 100
94 #endif
95
96 struct ktr_request {
97 struct ktr_header ktr_header;
98 void *ktr_buffer;
99 union {
100 struct ktr_proc_ctor ktr_proc_ctor;
101 struct ktr_cap_fail ktr_cap_fail;
102 struct ktr_syscall ktr_syscall;
103 struct ktr_sysret ktr_sysret;
104 struct ktr_genio ktr_genio;
105 struct ktr_psig ktr_psig;
106 struct ktr_csw ktr_csw;
107 struct ktr_fault ktr_fault;
108 struct ktr_faultend ktr_faultend;
109 struct ktr_struct_array ktr_struct_array;
110 } ktr_data;
111 STAILQ_ENTRY(ktr_request) ktr_list;
112 };
113
114 static int data_lengths[] = {
115 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args),
116 [KTR_SYSRET] = sizeof(struct ktr_sysret),
117 [KTR_NAMEI] = 0,
118 [KTR_GENIO] = sizeof(struct ktr_genio),
119 [KTR_PSIG] = sizeof(struct ktr_psig),
120 [KTR_CSW] = sizeof(struct ktr_csw),
121 [KTR_USER] = 0,
122 [KTR_STRUCT] = 0,
123 [KTR_SYSCTL] = 0,
124 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor),
125 [KTR_PROCDTOR] = 0,
126 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail),
127 [KTR_FAULT] = sizeof(struct ktr_fault),
128 [KTR_FAULTEND] = sizeof(struct ktr_faultend),
129 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array),
130 };
131
132 static STAILQ_HEAD(, ktr_request) ktr_free;
133
134 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
135
136 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
137 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
138
139 u_int ktr_geniosize = PAGE_SIZE;
140 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize,
141 0, "Maximum size of genio event payload");
142
143 static int print_message = 1;
144 static struct mtx ktrace_mtx;
145 static struct sx ktrace_sx;
146
147 static void ktrace_init(void *dummy);
148 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
149 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize);
150 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type);
151 static struct ktr_request *ktr_getrequest(int type);
152 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
153 static void ktr_freeproc(struct proc *p, struct ucred **uc,
154 struct vnode **vp);
155 static void ktr_freerequest(struct ktr_request *req);
156 static void ktr_freerequest_locked(struct ktr_request *req);
157 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
158 static int ktrcanset(struct thread *,struct proc *);
159 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
160 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
161 static void ktrprocctor_entered(struct thread *, struct proc *);
162
163 /*
164 * ktrace itself generates events, such as context switches, which we do not
165 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
166 * whether or not it is in a region where tracing of events should be
167 * suppressed.
168 */
169 static void
170 ktrace_enter(struct thread *td)
171 {
172
173 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
174 td->td_pflags |= TDP_INKTRACE;
175 }
176
177 static void
178 ktrace_exit(struct thread *td)
179 {
180
181 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
182 td->td_pflags &= ~TDP_INKTRACE;
183 }
184
185 static void
186 ktrace_assert(struct thread *td)
187 {
188
189 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
190 }
191
192 static void
193 ktrace_init(void *dummy)
194 {
195 struct ktr_request *req;
196 int i;
197
198 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
199 sx_init(&ktrace_sx, "ktrace_sx");
200 STAILQ_INIT(&ktr_free);
201 for (i = 0; i < ktr_requestpool; i++) {
202 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
203 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
204 }
205 }
206 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
207
208 static int
209 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
210 {
211 struct thread *td;
212 u_int newsize, oldsize, wantsize;
213 int error;
214
215 /* Handle easy read-only case first to avoid warnings from GCC. */
216 if (!req->newptr) {
217 oldsize = ktr_requestpool;
218 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
219 }
220
221 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
222 if (error)
223 return (error);
224 td = curthread;
225 ktrace_enter(td);
226 oldsize = ktr_requestpool;
227 newsize = ktrace_resize_pool(oldsize, wantsize);
228 ktrace_exit(td);
229 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
230 if (error)
231 return (error);
232 if (wantsize > oldsize && newsize < wantsize)
233 return (ENOSPC);
234 return (0);
235 }
236 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
237 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU",
238 "Pool buffer size for ktrace(1)");
239
240 static u_int
241 ktrace_resize_pool(u_int oldsize, u_int newsize)
242 {
243 STAILQ_HEAD(, ktr_request) ktr_new;
244 struct ktr_request *req;
245 int bound;
246
247 print_message = 1;
248 bound = newsize - oldsize;
249 if (bound == 0)
250 return (ktr_requestpool);
251 if (bound < 0) {
252 mtx_lock(&ktrace_mtx);
253 /* Shrink pool down to newsize if possible. */
254 while (bound++ < 0) {
255 req = STAILQ_FIRST(&ktr_free);
256 if (req == NULL)
257 break;
258 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
259 ktr_requestpool--;
260 free(req, M_KTRACE);
261 }
262 } else {
263 /* Grow pool up to newsize. */
264 STAILQ_INIT(&ktr_new);
265 while (bound-- > 0) {
266 req = malloc(sizeof(struct ktr_request), M_KTRACE,
267 M_WAITOK);
268 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list);
269 }
270 mtx_lock(&ktrace_mtx);
271 STAILQ_CONCAT(&ktr_free, &ktr_new);
272 ktr_requestpool += (newsize - oldsize);
273 }
274 mtx_unlock(&ktrace_mtx);
275 return (ktr_requestpool);
276 }
277
278 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */
279 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
280 (sizeof((struct thread *)NULL)->td_name));
281
282 static struct ktr_request *
283 ktr_getrequest_entered(struct thread *td, int type)
284 {
285 struct ktr_request *req;
286 struct proc *p = td->td_proc;
287 int pm;
288
289 mtx_lock(&ktrace_mtx);
290 if (!KTRCHECK(td, type)) {
291 mtx_unlock(&ktrace_mtx);
292 return (NULL);
293 }
294 req = STAILQ_FIRST(&ktr_free);
295 if (req != NULL) {
296 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
297 req->ktr_header.ktr_type = type;
298 if (p->p_traceflag & KTRFAC_DROP) {
299 req->ktr_header.ktr_type |= KTR_DROP;
300 p->p_traceflag &= ~KTRFAC_DROP;
301 }
302 mtx_unlock(&ktrace_mtx);
303 microtime(&req->ktr_header.ktr_time);
304 req->ktr_header.ktr_pid = p->p_pid;
305 req->ktr_header.ktr_tid = td->td_tid;
306 bcopy(td->td_name, req->ktr_header.ktr_comm,
307 sizeof(req->ktr_header.ktr_comm));
308 req->ktr_buffer = NULL;
309 req->ktr_header.ktr_len = 0;
310 } else {
311 p->p_traceflag |= KTRFAC_DROP;
312 pm = print_message;
313 print_message = 0;
314 mtx_unlock(&ktrace_mtx);
315 if (pm)
316 printf("Out of ktrace request objects.\n");
317 }
318 return (req);
319 }
320
321 static struct ktr_request *
322 ktr_getrequest(int type)
323 {
324 struct thread *td = curthread;
325 struct ktr_request *req;
326
327 ktrace_enter(td);
328 req = ktr_getrequest_entered(td, type);
329 if (req == NULL)
330 ktrace_exit(td);
331
332 return (req);
333 }
334
335 /*
336 * Some trace generation environments don't permit direct access to VFS,
337 * such as during a context switch where sleeping is not allowed. Under these
338 * circumstances, queue a request to the thread to be written asynchronously
339 * later.
340 */
341 static void
342 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
343 {
344
345 mtx_lock(&ktrace_mtx);
346 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
347 mtx_unlock(&ktrace_mtx);
348 }
349
350 /*
351 * Drain any pending ktrace records from the per-thread queue to disk. This
352 * is used both internally before committing other records, and also on
353 * system call return. We drain all the ones we can find at the time when
354 * drain is requested, but don't keep draining after that as those events
355 * may be approximately "after" the current event.
356 */
357 static void
358 ktr_drain(struct thread *td)
359 {
360 struct ktr_request *queued_req;
361 STAILQ_HEAD(, ktr_request) local_queue;
362
363 ktrace_assert(td);
364 sx_assert(&ktrace_sx, SX_XLOCKED);
365
366 STAILQ_INIT(&local_queue);
367
368 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
369 mtx_lock(&ktrace_mtx);
370 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
371 mtx_unlock(&ktrace_mtx);
372
373 while ((queued_req = STAILQ_FIRST(&local_queue))) {
374 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
375 ktr_writerequest(td, queued_req);
376 ktr_freerequest(queued_req);
377 }
378 }
379 }
380
381 /*
382 * Submit a trace record for immediate commit to disk -- to be used only
383 * where entering VFS is OK. First drain any pending records that may have
384 * been cached in the thread.
385 */
386 static void
387 ktr_submitrequest(struct thread *td, struct ktr_request *req)
388 {
389
390 ktrace_assert(td);
391
392 sx_xlock(&ktrace_sx);
393 ktr_drain(td);
394 ktr_writerequest(td, req);
395 ktr_freerequest(req);
396 sx_xunlock(&ktrace_sx);
397 ktrace_exit(td);
398 }
399
400 static void
401 ktr_freerequest(struct ktr_request *req)
402 {
403
404 mtx_lock(&ktrace_mtx);
405 ktr_freerequest_locked(req);
406 mtx_unlock(&ktrace_mtx);
407 }
408
409 static void
410 ktr_freerequest_locked(struct ktr_request *req)
411 {
412
413 mtx_assert(&ktrace_mtx, MA_OWNED);
414 if (req->ktr_buffer != NULL)
415 free(req->ktr_buffer, M_KTRACE);
416 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
417 }
418
419 /*
420 * Disable tracing for a process and release all associated resources.
421 * The caller is responsible for releasing a reference on the returned
422 * vnode and credentials.
423 */
424 static void
425 ktr_freeproc(struct proc *p, struct ucred **uc, struct vnode **vp)
426 {
427 struct ktr_request *req;
428
429 PROC_LOCK_ASSERT(p, MA_OWNED);
430 mtx_assert(&ktrace_mtx, MA_OWNED);
431 *uc = p->p_tracecred;
432 p->p_tracecred = NULL;
433 if (vp != NULL)
434 *vp = p->p_tracevp;
435 p->p_tracevp = NULL;
436 p->p_traceflag = 0;
437 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
438 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
439 ktr_freerequest_locked(req);
440 }
441 }
442
443 void
444 ktrsyscall(int code, int narg, register_t args[])
445 {
446 struct ktr_request *req;
447 struct ktr_syscall *ktp;
448 size_t buflen;
449 char *buf = NULL;
450
451 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
452 return;
453
454 buflen = sizeof(register_t) * narg;
455 if (buflen > 0) {
456 buf = malloc(buflen, M_KTRACE, M_WAITOK);
457 bcopy(args, buf, buflen);
458 }
459 req = ktr_getrequest(KTR_SYSCALL);
460 if (req == NULL) {
461 if (buf != NULL)
462 free(buf, M_KTRACE);
463 return;
464 }
465 ktp = &req->ktr_data.ktr_syscall;
466 ktp->ktr_code = code;
467 ktp->ktr_narg = narg;
468 if (buflen > 0) {
469 req->ktr_header.ktr_len = buflen;
470 req->ktr_buffer = buf;
471 }
472 ktr_submitrequest(curthread, req);
473 }
474
475 void
476 ktrsysret(int code, int error, register_t retval)
477 {
478 struct ktr_request *req;
479 struct ktr_sysret *ktp;
480
481 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
482 return;
483
484 req = ktr_getrequest(KTR_SYSRET);
485 if (req == NULL)
486 return;
487 ktp = &req->ktr_data.ktr_sysret;
488 ktp->ktr_code = code;
489 ktp->ktr_error = error;
490 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */
491 ktr_submitrequest(curthread, req);
492 }
493
494 /*
495 * When a setuid process execs, disable tracing.
496 *
497 * XXX: We toss any pending asynchronous records.
498 */
499 void
500 ktrprocexec(struct proc *p, struct ucred **uc, struct vnode **vp)
501 {
502
503 PROC_LOCK_ASSERT(p, MA_OWNED);
504 mtx_lock(&ktrace_mtx);
505 ktr_freeproc(p, uc, vp);
506 mtx_unlock(&ktrace_mtx);
507 }
508
509 /*
510 * When a process exits, drain per-process asynchronous trace records
511 * and disable tracing.
512 */
513 void
514 ktrprocexit(struct thread *td)
515 {
516 struct ktr_request *req;
517 struct proc *p;
518 struct ucred *cred;
519 struct vnode *vp;
520
521 p = td->td_proc;
522 if (p->p_traceflag == 0)
523 return;
524
525 ktrace_enter(td);
526 req = ktr_getrequest_entered(td, KTR_PROCDTOR);
527 if (req != NULL)
528 ktr_enqueuerequest(td, req);
529 sx_xlock(&ktrace_sx);
530 ktr_drain(td);
531 sx_xunlock(&ktrace_sx);
532 PROC_LOCK(p);
533 mtx_lock(&ktrace_mtx);
534 ktr_freeproc(p, &cred, &vp);
535 mtx_unlock(&ktrace_mtx);
536 PROC_UNLOCK(p);
537 if (vp != NULL)
538 vrele(vp);
539 if (cred != NULL)
540 crfree(cred);
541 ktrace_exit(td);
542 }
543
544 static void
545 ktrprocctor_entered(struct thread *td, struct proc *p)
546 {
547 struct ktr_proc_ctor *ktp;
548 struct ktr_request *req;
549 struct thread *td2;
550
551 ktrace_assert(td);
552 td2 = FIRST_THREAD_IN_PROC(p);
553 req = ktr_getrequest_entered(td2, KTR_PROCCTOR);
554 if (req == NULL)
555 return;
556 ktp = &req->ktr_data.ktr_proc_ctor;
557 ktp->sv_flags = p->p_sysent->sv_flags;
558 ktr_enqueuerequest(td2, req);
559 }
560
561 void
562 ktrprocctor(struct proc *p)
563 {
564 struct thread *td = curthread;
565
566 if ((p->p_traceflag & KTRFAC_MASK) == 0)
567 return;
568
569 ktrace_enter(td);
570 ktrprocctor_entered(td, p);
571 ktrace_exit(td);
572 }
573
574 /*
575 * When a process forks, enable tracing in the new process if needed.
576 */
577 void
578 ktrprocfork(struct proc *p1, struct proc *p2)
579 {
580
581 MPASS(p2->p_tracevp == NULL);
582 MPASS(p2->p_traceflag == 0);
583
584 if (p1->p_traceflag == 0)
585 return;
586
587 PROC_LOCK(p1);
588 mtx_lock(&ktrace_mtx);
589 if (p1->p_traceflag & KTRFAC_INHERIT) {
590 p2->p_traceflag = p1->p_traceflag;
591 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
592 VREF(p2->p_tracevp);
593 KASSERT(p1->p_tracecred != NULL,
594 ("ktrace vnode with no cred"));
595 p2->p_tracecred = crhold(p1->p_tracecred);
596 }
597 }
598 mtx_unlock(&ktrace_mtx);
599 PROC_UNLOCK(p1);
600
601 ktrprocctor(p2);
602 }
603
604 /*
605 * When a thread returns, drain any asynchronous records generated by the
606 * system call.
607 */
608 void
609 ktruserret(struct thread *td)
610 {
611
612 ktrace_enter(td);
613 sx_xlock(&ktrace_sx);
614 ktr_drain(td);
615 sx_xunlock(&ktrace_sx);
616 ktrace_exit(td);
617 }
618
619 void
620 ktrnamei(path)
621 char *path;
622 {
623 struct ktr_request *req;
624 int namelen;
625 char *buf = NULL;
626
627 namelen = strlen(path);
628 if (namelen > 0) {
629 buf = malloc(namelen, M_KTRACE, M_WAITOK);
630 bcopy(path, buf, namelen);
631 }
632 req = ktr_getrequest(KTR_NAMEI);
633 if (req == NULL) {
634 if (buf != NULL)
635 free(buf, M_KTRACE);
636 return;
637 }
638 if (namelen > 0) {
639 req->ktr_header.ktr_len = namelen;
640 req->ktr_buffer = buf;
641 }
642 ktr_submitrequest(curthread, req);
643 }
644
645 void
646 ktrsysctl(int *name, u_int namelen)
647 {
648 struct ktr_request *req;
649 u_int mib[CTL_MAXNAME + 2];
650 char *mibname;
651 size_t mibnamelen;
652 int error;
653
654 /* Lookup name of mib. */
655 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
656 mib[0] = 0;
657 mib[1] = 1;
658 bcopy(name, mib + 2, namelen * sizeof(*name));
659 mibnamelen = 128;
660 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
661 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
662 NULL, 0, &mibnamelen, 0);
663 if (error) {
664 free(mibname, M_KTRACE);
665 return;
666 }
667 req = ktr_getrequest(KTR_SYSCTL);
668 if (req == NULL) {
669 free(mibname, M_KTRACE);
670 return;
671 }
672 req->ktr_header.ktr_len = mibnamelen;
673 req->ktr_buffer = mibname;
674 ktr_submitrequest(curthread, req);
675 }
676
677 void
678 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error)
679 {
680 struct ktr_request *req;
681 struct ktr_genio *ktg;
682 int datalen;
683 char *buf;
684
685 if (error) {
686 free(uio, M_IOV);
687 return;
688 }
689 uio->uio_offset = 0;
690 uio->uio_rw = UIO_WRITE;
691 datalen = MIN(uio->uio_resid, ktr_geniosize);
692 buf = malloc(datalen, M_KTRACE, M_WAITOK);
693 error = uiomove(buf, datalen, uio);
694 free(uio, M_IOV);
695 if (error) {
696 free(buf, M_KTRACE);
697 return;
698 }
699 req = ktr_getrequest(KTR_GENIO);
700 if (req == NULL) {
701 free(buf, M_KTRACE);
702 return;
703 }
704 ktg = &req->ktr_data.ktr_genio;
705 ktg->ktr_fd = fd;
706 ktg->ktr_rw = rw;
707 req->ktr_header.ktr_len = datalen;
708 req->ktr_buffer = buf;
709 ktr_submitrequest(curthread, req);
710 }
711
712 void
713 ktrpsig(int sig, sig_t action, sigset_t *mask, int code)
714 {
715 struct thread *td = curthread;
716 struct ktr_request *req;
717 struct ktr_psig *kp;
718
719 req = ktr_getrequest(KTR_PSIG);
720 if (req == NULL)
721 return;
722 kp = &req->ktr_data.ktr_psig;
723 kp->signo = (char)sig;
724 kp->action = action;
725 kp->mask = *mask;
726 kp->code = code;
727 ktr_enqueuerequest(td, req);
728 ktrace_exit(td);
729 }
730
731 void
732 ktrcsw(int out, int user, const char *wmesg)
733 {
734 struct thread *td = curthread;
735 struct ktr_request *req;
736 struct ktr_csw *kc;
737
738 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
739 return;
740
741 req = ktr_getrequest(KTR_CSW);
742 if (req == NULL)
743 return;
744 kc = &req->ktr_data.ktr_csw;
745 kc->out = out;
746 kc->user = user;
747 if (wmesg != NULL)
748 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg));
749 else
750 bzero(kc->wmesg, sizeof(kc->wmesg));
751 ktr_enqueuerequest(td, req);
752 ktrace_exit(td);
753 }
754
755 void
756 ktrstruct(const char *name, const void *data, size_t datalen)
757 {
758 struct ktr_request *req;
759 char *buf;
760 size_t buflen, namelen;
761
762 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
763 return;
764
765 if (data == NULL)
766 datalen = 0;
767 namelen = strlen(name) + 1;
768 buflen = namelen + datalen;
769 buf = malloc(buflen, M_KTRACE, M_WAITOK);
770 strcpy(buf, name);
771 bcopy(data, buf + namelen, datalen);
772 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
773 free(buf, M_KTRACE);
774 return;
775 }
776 req->ktr_buffer = buf;
777 req->ktr_header.ktr_len = buflen;
778 ktr_submitrequest(curthread, req);
779 }
780
781 void
782 ktrstructarray(const char *name, enum uio_seg seg, const void *data,
783 int num_items, size_t struct_size)
784 {
785 struct ktr_request *req;
786 struct ktr_struct_array *ksa;
787 char *buf;
788 size_t buflen, datalen, namelen;
789 int max_items;
790
791 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
792 return;
793
794 /* Trim array length to genio size. */
795 max_items = ktr_geniosize / struct_size;
796 if (num_items > max_items) {
797 if (max_items == 0)
798 num_items = 1;
799 else
800 num_items = max_items;
801 }
802 datalen = num_items * struct_size;
803
804 if (data == NULL)
805 datalen = 0;
806
807 namelen = strlen(name) + 1;
808 buflen = namelen + datalen;
809 buf = malloc(buflen, M_KTRACE, M_WAITOK);
810 strcpy(buf, name);
811 if (seg == UIO_SYSSPACE)
812 bcopy(data, buf + namelen, datalen);
813 else {
814 if (copyin(data, buf + namelen, datalen) != 0) {
815 free(buf, M_KTRACE);
816 return;
817 }
818 }
819 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) {
820 free(buf, M_KTRACE);
821 return;
822 }
823 ksa = &req->ktr_data.ktr_struct_array;
824 ksa->struct_size = struct_size;
825 req->ktr_buffer = buf;
826 req->ktr_header.ktr_len = buflen;
827 ktr_submitrequest(curthread, req);
828 }
829
830 void
831 ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed,
832 const cap_rights_t *held)
833 {
834 struct thread *td = curthread;
835 struct ktr_request *req;
836 struct ktr_cap_fail *kcf;
837
838 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
839 return;
840
841 req = ktr_getrequest(KTR_CAPFAIL);
842 if (req == NULL)
843 return;
844 kcf = &req->ktr_data.ktr_cap_fail;
845 kcf->cap_type = type;
846 if (needed != NULL)
847 kcf->cap_needed = *needed;
848 else
849 cap_rights_init(&kcf->cap_needed);
850 if (held != NULL)
851 kcf->cap_held = *held;
852 else
853 cap_rights_init(&kcf->cap_held);
854 ktr_enqueuerequest(td, req);
855 ktrace_exit(td);
856 }
857
858 void
859 ktrfault(vm_offset_t vaddr, int type)
860 {
861 struct thread *td = curthread;
862 struct ktr_request *req;
863 struct ktr_fault *kf;
864
865 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
866 return;
867
868 req = ktr_getrequest(KTR_FAULT);
869 if (req == NULL)
870 return;
871 kf = &req->ktr_data.ktr_fault;
872 kf->vaddr = vaddr;
873 kf->type = type;
874 ktr_enqueuerequest(td, req);
875 ktrace_exit(td);
876 }
877
878 void
879 ktrfaultend(int result)
880 {
881 struct thread *td = curthread;
882 struct ktr_request *req;
883 struct ktr_faultend *kf;
884
885 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
886 return;
887
888 req = ktr_getrequest(KTR_FAULTEND);
889 if (req == NULL)
890 return;
891 kf = &req->ktr_data.ktr_faultend;
892 kf->result = result;
893 ktr_enqueuerequest(td, req);
894 ktrace_exit(td);
895 }
896 #endif /* KTRACE */
897
898 /* Interface and common routines */
899
900 #ifndef _SYS_SYSPROTO_H_
901 struct ktrace_args {
902 char *fname;
903 int ops;
904 int facs;
905 int pid;
906 };
907 #endif
908 /* ARGSUSED */
909 int
910 sys_ktrace(struct thread *td, struct ktrace_args *uap)
911 {
912 #ifdef KTRACE
913 struct vnode *vp = NULL;
914 struct proc *p;
915 struct pgrp *pg;
916 int facs = uap->facs & ~KTRFAC_ROOT;
917 int ops = KTROP(uap->ops);
918 int descend = uap->ops & KTRFLAG_DESCEND;
919 int nfound, ret = 0;
920 int flags, error = 0;
921 struct nameidata nd;
922 struct ucred *cred;
923
924 /*
925 * Need something to (un)trace.
926 */
927 if (ops != KTROP_CLEARFILE && facs == 0)
928 return (EINVAL);
929
930 ktrace_enter(td);
931 if (ops != KTROP_CLEAR) {
932 /*
933 * an operation which requires a file argument.
934 */
935 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
936 flags = FREAD | FWRITE | O_NOFOLLOW;
937 error = vn_open(&nd, &flags, 0, NULL);
938 if (error) {
939 ktrace_exit(td);
940 return (error);
941 }
942 NDFREE(&nd, NDF_ONLY_PNBUF);
943 vp = nd.ni_vp;
944 VOP_UNLOCK(vp, 0);
945 if (vp->v_type != VREG) {
946 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
947 ktrace_exit(td);
948 return (EACCES);
949 }
950 }
951 /*
952 * Clear all uses of the tracefile.
953 */
954 if (ops == KTROP_CLEARFILE) {
955 int vrele_count;
956
957 vrele_count = 0;
958 sx_slock(&allproc_lock);
959 FOREACH_PROC_IN_SYSTEM(p) {
960 PROC_LOCK(p);
961 if (p->p_tracevp == vp) {
962 if (ktrcanset(td, p)) {
963 mtx_lock(&ktrace_mtx);
964 ktr_freeproc(p, &cred, NULL);
965 mtx_unlock(&ktrace_mtx);
966 vrele_count++;
967 crfree(cred);
968 } else
969 error = EPERM;
970 }
971 PROC_UNLOCK(p);
972 }
973 sx_sunlock(&allproc_lock);
974 if (vrele_count > 0) {
975 while (vrele_count-- > 0)
976 vrele(vp);
977 }
978 goto done;
979 }
980 /*
981 * do it
982 */
983 sx_slock(&proctree_lock);
984 if (uap->pid < 0) {
985 /*
986 * by process group
987 */
988 pg = pgfind(-uap->pid);
989 if (pg == NULL) {
990 sx_sunlock(&proctree_lock);
991 error = ESRCH;
992 goto done;
993 }
994 /*
995 * ktrops() may call vrele(). Lock pg_members
996 * by the proctree_lock rather than pg_mtx.
997 */
998 PGRP_UNLOCK(pg);
999 nfound = 0;
1000 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1001 PROC_LOCK(p);
1002 if (p->p_state == PRS_NEW ||
1003 p_cansee(td, p) != 0) {
1004 PROC_UNLOCK(p);
1005 continue;
1006 }
1007 nfound++;
1008 if (descend)
1009 ret |= ktrsetchildren(td, p, ops, facs, vp);
1010 else
1011 ret |= ktrops(td, p, ops, facs, vp);
1012 }
1013 if (nfound == 0) {
1014 sx_sunlock(&proctree_lock);
1015 error = ESRCH;
1016 goto done;
1017 }
1018 } else {
1019 /*
1020 * by pid
1021 */
1022 p = pfind(uap->pid);
1023 if (p == NULL)
1024 error = ESRCH;
1025 else
1026 error = p_cansee(td, p);
1027 if (error) {
1028 if (p != NULL)
1029 PROC_UNLOCK(p);
1030 sx_sunlock(&proctree_lock);
1031 goto done;
1032 }
1033 if (descend)
1034 ret |= ktrsetchildren(td, p, ops, facs, vp);
1035 else
1036 ret |= ktrops(td, p, ops, facs, vp);
1037 }
1038 sx_sunlock(&proctree_lock);
1039 if (!ret)
1040 error = EPERM;
1041 done:
1042 if (vp != NULL)
1043 (void) vn_close(vp, FWRITE, td->td_ucred, td);
1044 ktrace_exit(td);
1045 return (error);
1046 #else /* !KTRACE */
1047 return (ENOSYS);
1048 #endif /* KTRACE */
1049 }
1050
1051 /* ARGSUSED */
1052 int
1053 sys_utrace(struct thread *td, struct utrace_args *uap)
1054 {
1055
1056 #ifdef KTRACE
1057 struct ktr_request *req;
1058 void *cp;
1059 int error;
1060
1061 if (!KTRPOINT(td, KTR_USER))
1062 return (0);
1063 if (uap->len > KTR_USER_MAXLEN)
1064 return (EINVAL);
1065 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
1066 error = copyin(uap->addr, cp, uap->len);
1067 if (error) {
1068 free(cp, M_KTRACE);
1069 return (error);
1070 }
1071 req = ktr_getrequest(KTR_USER);
1072 if (req == NULL) {
1073 free(cp, M_KTRACE);
1074 return (ENOMEM);
1075 }
1076 req->ktr_buffer = cp;
1077 req->ktr_header.ktr_len = uap->len;
1078 ktr_submitrequest(td, req);
1079 return (0);
1080 #else /* !KTRACE */
1081 return (ENOSYS);
1082 #endif /* KTRACE */
1083 }
1084
1085 #ifdef KTRACE
1086 static int
1087 ktrops(struct thread *td, struct proc *p, int ops, int facs, struct vnode *vp)
1088 {
1089 struct vnode *tracevp = NULL;
1090 struct ucred *tracecred = NULL;
1091
1092 PROC_LOCK_ASSERT(p, MA_OWNED);
1093 if (!ktrcanset(td, p)) {
1094 PROC_UNLOCK(p);
1095 return (0);
1096 }
1097 if (p->p_flag & P_WEXIT) {
1098 /* If the process is exiting, just ignore it. */
1099 PROC_UNLOCK(p);
1100 return (1);
1101 }
1102 mtx_lock(&ktrace_mtx);
1103 if (ops == KTROP_SET) {
1104 if (p->p_tracevp != vp) {
1105 /*
1106 * if trace file already in use, relinquish below
1107 */
1108 tracevp = p->p_tracevp;
1109 VREF(vp);
1110 p->p_tracevp = vp;
1111 }
1112 if (p->p_tracecred != td->td_ucred) {
1113 tracecred = p->p_tracecred;
1114 p->p_tracecred = crhold(td->td_ucred);
1115 }
1116 p->p_traceflag |= facs;
1117 if (priv_check(td, PRIV_KTRACE) == 0)
1118 p->p_traceflag |= KTRFAC_ROOT;
1119 } else {
1120 /* KTROP_CLEAR */
1121 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
1122 /* no more tracing */
1123 ktr_freeproc(p, &tracecred, &tracevp);
1124 }
1125 mtx_unlock(&ktrace_mtx);
1126 if ((p->p_traceflag & KTRFAC_MASK) != 0)
1127 ktrprocctor_entered(td, p);
1128 PROC_UNLOCK(p);
1129 if (tracevp != NULL)
1130 vrele(tracevp);
1131 if (tracecred != NULL)
1132 crfree(tracecred);
1133
1134 return (1);
1135 }
1136
1137 static int
1138 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs,
1139 struct vnode *vp)
1140 {
1141 struct proc *p;
1142 int ret = 0;
1143
1144 p = top;
1145 PROC_LOCK_ASSERT(p, MA_OWNED);
1146 sx_assert(&proctree_lock, SX_LOCKED);
1147 for (;;) {
1148 ret |= ktrops(td, p, ops, facs, vp);
1149 /*
1150 * If this process has children, descend to them next,
1151 * otherwise do any siblings, and if done with this level,
1152 * follow back up the tree (but not past top).
1153 */
1154 if (!LIST_EMPTY(&p->p_children))
1155 p = LIST_FIRST(&p->p_children);
1156 else for (;;) {
1157 if (p == top)
1158 return (ret);
1159 if (LIST_NEXT(p, p_sibling)) {
1160 p = LIST_NEXT(p, p_sibling);
1161 break;
1162 }
1163 p = p->p_pptr;
1164 }
1165 PROC_LOCK(p);
1166 }
1167 /*NOTREACHED*/
1168 }
1169
1170 static void
1171 ktr_writerequest(struct thread *td, struct ktr_request *req)
1172 {
1173 struct ktr_header *kth;
1174 struct vnode *vp;
1175 struct proc *p;
1176 struct ucred *cred;
1177 struct uio auio;
1178 struct iovec aiov[3];
1179 struct mount *mp;
1180 int datalen, buflen, vrele_count;
1181 int error;
1182
1183 /*
1184 * We hold the vnode and credential for use in I/O in case ktrace is
1185 * disabled on the process as we write out the request.
1186 *
1187 * XXXRW: This is not ideal: we could end up performing a write after
1188 * the vnode has been closed.
1189 */
1190 mtx_lock(&ktrace_mtx);
1191 vp = td->td_proc->p_tracevp;
1192 cred = td->td_proc->p_tracecred;
1193
1194 /*
1195 * If vp is NULL, the vp has been cleared out from under this
1196 * request, so just drop it. Make sure the credential and vnode are
1197 * in sync: we should have both or neither.
1198 */
1199 if (vp == NULL) {
1200 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
1201 mtx_unlock(&ktrace_mtx);
1202 return;
1203 }
1204 VREF(vp);
1205 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
1206 crhold(cred);
1207 mtx_unlock(&ktrace_mtx);
1208
1209 kth = &req->ktr_header;
1210 KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) < nitems(data_lengths),
1211 ("data_lengths array overflow"));
1212 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
1213 buflen = kth->ktr_len;
1214 auio.uio_iov = &aiov[0];
1215 auio.uio_offset = 0;
1216 auio.uio_segflg = UIO_SYSSPACE;
1217 auio.uio_rw = UIO_WRITE;
1218 aiov[0].iov_base = (caddr_t)kth;
1219 aiov[0].iov_len = sizeof(struct ktr_header);
1220 auio.uio_resid = sizeof(struct ktr_header);
1221 auio.uio_iovcnt = 1;
1222 auio.uio_td = td;
1223 if (datalen != 0) {
1224 aiov[1].iov_base = (caddr_t)&req->ktr_data;
1225 aiov[1].iov_len = datalen;
1226 auio.uio_resid += datalen;
1227 auio.uio_iovcnt++;
1228 kth->ktr_len += datalen;
1229 }
1230 if (buflen != 0) {
1231 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
1232 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
1233 aiov[auio.uio_iovcnt].iov_len = buflen;
1234 auio.uio_resid += buflen;
1235 auio.uio_iovcnt++;
1236 }
1237
1238 vn_start_write(vp, &mp, V_WAIT);
1239 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1240 #ifdef MAC
1241 error = mac_vnode_check_write(cred, NOCRED, vp);
1242 if (error == 0)
1243 #endif
1244 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
1245 VOP_UNLOCK(vp, 0);
1246 vn_finished_write(mp);
1247 crfree(cred);
1248 if (!error) {
1249 vrele(vp);
1250 return;
1251 }
1252
1253 /*
1254 * If error encountered, give up tracing on this vnode. We defer
1255 * all the vrele()'s on the vnode until after we are finished walking
1256 * the various lists to avoid needlessly holding locks.
1257 * NB: at this point we still hold the vnode reference that must
1258 * not go away as we need the valid vnode to compare with. Thus let
1259 * vrele_count start at 1 and the reference will be freed
1260 * by the loop at the end after our last use of vp.
1261 */
1262 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
1263 error);
1264 vrele_count = 1;
1265 /*
1266 * First, clear this vnode from being used by any processes in the
1267 * system.
1268 * XXX - If one process gets an EPERM writing to the vnode, should
1269 * we really do this? Other processes might have suitable
1270 * credentials for the operation.
1271 */
1272 cred = NULL;
1273 sx_slock(&allproc_lock);
1274 FOREACH_PROC_IN_SYSTEM(p) {
1275 PROC_LOCK(p);
1276 if (p->p_tracevp == vp) {
1277 mtx_lock(&ktrace_mtx);
1278 ktr_freeproc(p, &cred, NULL);
1279 mtx_unlock(&ktrace_mtx);
1280 vrele_count++;
1281 }
1282 PROC_UNLOCK(p);
1283 if (cred != NULL) {
1284 crfree(cred);
1285 cred = NULL;
1286 }
1287 }
1288 sx_sunlock(&allproc_lock);
1289
1290 while (vrele_count-- > 0)
1291 vrele(vp);
1292 }
1293
1294 /*
1295 * Return true if caller has permission to set the ktracing state
1296 * of target. Essentially, the target can't possess any
1297 * more permissions than the caller. KTRFAC_ROOT signifies that
1298 * root previously set the tracing status on the target process, and
1299 * so, only root may further change it.
1300 */
1301 static int
1302 ktrcanset(struct thread *td, struct proc *targetp)
1303 {
1304
1305 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1306 if (targetp->p_traceflag & KTRFAC_ROOT &&
1307 priv_check(td, PRIV_KTRACE))
1308 return (0);
1309
1310 if (p_candebug(td, targetp) != 0)
1311 return (0);
1312
1313 return (1);
1314 }
1315
1316 #endif /* KTRACE */
Cache object: a2fcb3812a6a907b3210a23bc389e2be
|