1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005 Robert N. M. Watson
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_ktrace.h"
40
41 #include <sys/param.h>
42 #include <sys/capsicum.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/unistd.h>
56 #include <sys/vnode.h>
57 #include <sys/socket.h>
58 #include <sys/stat.h>
59 #include <sys/ktrace.h>
60 #include <sys/sx.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysent.h>
63 #include <sys/syslog.h>
64 #include <sys/sysproto.h>
65
66 #include <security/mac/mac_framework.h>
67
68 /*
69 * The ktrace facility allows the tracing of certain key events in user space
70 * processes, such as system calls, signal delivery, context switches, and
71 * user generated events using utrace(2). It works by streaming event
72 * records and data to a vnode associated with the process using the
73 * ktrace(2) system call. In general, records can be written directly from
74 * the context that generates the event. One important exception to this is
75 * during a context switch, where sleeping is not permitted. To handle this
76 * case, trace events are generated using in-kernel ktr_request records, and
77 * then delivered to disk at a convenient moment -- either immediately, the
78 * next traceable event, at system call return, or at process exit.
79 *
80 * When dealing with multiple threads or processes writing to the same event
81 * log, ordering guarantees are weak: specifically, if an event has multiple
82 * records (i.e., system call enter and return), they may be interlaced with
83 * records from another event. Process and thread ID information is provided
84 * in the record, and user applications can de-interlace events if required.
85 */
86
87 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
88
89 #ifdef KTRACE
90
91 FEATURE(ktrace, "Kernel support for system-call tracing");
92
93 #ifndef KTRACE_REQUEST_POOL
94 #define KTRACE_REQUEST_POOL 100
95 #endif
96
97 struct ktr_request {
98 struct ktr_header ktr_header;
99 void *ktr_buffer;
100 union {
101 struct ktr_proc_ctor ktr_proc_ctor;
102 struct ktr_cap_fail ktr_cap_fail;
103 struct ktr_syscall ktr_syscall;
104 struct ktr_sysret ktr_sysret;
105 struct ktr_genio ktr_genio;
106 struct ktr_psig ktr_psig;
107 struct ktr_csw ktr_csw;
108 struct ktr_fault ktr_fault;
109 struct ktr_faultend ktr_faultend;
110 struct ktr_struct_array ktr_struct_array;
111 } ktr_data;
112 STAILQ_ENTRY(ktr_request) ktr_list;
113 };
114
115 static int data_lengths[] = {
116 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args),
117 [KTR_SYSRET] = sizeof(struct ktr_sysret),
118 [KTR_NAMEI] = 0,
119 [KTR_GENIO] = sizeof(struct ktr_genio),
120 [KTR_PSIG] = sizeof(struct ktr_psig),
121 [KTR_CSW] = sizeof(struct ktr_csw),
122 [KTR_USER] = 0,
123 [KTR_STRUCT] = 0,
124 [KTR_SYSCTL] = 0,
125 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor),
126 [KTR_PROCDTOR] = 0,
127 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail),
128 [KTR_FAULT] = sizeof(struct ktr_fault),
129 [KTR_FAULTEND] = sizeof(struct ktr_faultend),
130 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array),
131 };
132
133 static STAILQ_HEAD(, ktr_request) ktr_free;
134
135 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
136 "KTRACE options");
137
138 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
139 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
140
141 u_int ktr_geniosize = PAGE_SIZE;
142 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize,
143 0, "Maximum size of genio event payload");
144
145 /*
146 * Allow to not to send signal to traced process, in which context the
147 * ktr record is written. The limit is applied from the process that
148 * set up ktrace, so killing the traced process is not completely fair.
149 */
150 int ktr_filesize_limit_signal = 0;
151 SYSCTL_INT(_kern_ktrace, OID_AUTO, filesize_limit_signal, CTLFLAG_RWTUN,
152 &ktr_filesize_limit_signal, 0,
153 "Send SIGXFSZ to the traced process when the log size limit is exceeded");
154
155 static int print_message = 1;
156 static struct mtx ktrace_mtx;
157 static struct sx ktrace_sx;
158
159 struct ktr_io_params {
160 struct vnode *vp;
161 struct ucred *cr;
162 off_t lim;
163 u_int refs;
164 };
165
166 static void ktrace_init(void *dummy);
167 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
168 static u_int ktrace_resize_pool(u_int oldsize, u_int newsize);
169 static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type);
170 static struct ktr_request *ktr_getrequest(int type);
171 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
172 static struct ktr_io_params *ktr_freeproc(struct proc *p);
173 static void ktr_freerequest(struct ktr_request *req);
174 static void ktr_freerequest_locked(struct ktr_request *req);
175 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
176 static int ktrcanset(struct thread *,struct proc *);
177 static int ktrsetchildren(struct thread *, struct proc *, int, int,
178 struct ktr_io_params *);
179 static int ktrops(struct thread *, struct proc *, int, int,
180 struct ktr_io_params *);
181 static void ktrprocctor_entered(struct thread *, struct proc *);
182
183 /*
184 * ktrace itself generates events, such as context switches, which we do not
185 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
186 * whether or not it is in a region where tracing of events should be
187 * suppressed.
188 */
189 static void
190 ktrace_enter(struct thread *td)
191 {
192
193 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
194 td->td_pflags |= TDP_INKTRACE;
195 }
196
197 static void
198 ktrace_exit(struct thread *td)
199 {
200
201 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
202 td->td_pflags &= ~TDP_INKTRACE;
203 }
204
205 static void
206 ktrace_assert(struct thread *td)
207 {
208
209 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
210 }
211
212 static void
213 ktrace_init(void *dummy)
214 {
215 struct ktr_request *req;
216 int i;
217
218 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
219 sx_init(&ktrace_sx, "ktrace_sx");
220 STAILQ_INIT(&ktr_free);
221 for (i = 0; i < ktr_requestpool; i++) {
222 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK |
223 M_ZERO);
224 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
225 }
226 }
227 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
228
229 static int
230 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
231 {
232 struct thread *td;
233 u_int newsize, oldsize, wantsize;
234 int error;
235
236 /* Handle easy read-only case first to avoid warnings from GCC. */
237 if (!req->newptr) {
238 oldsize = ktr_requestpool;
239 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
240 }
241
242 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
243 if (error)
244 return (error);
245 td = curthread;
246 ktrace_enter(td);
247 oldsize = ktr_requestpool;
248 newsize = ktrace_resize_pool(oldsize, wantsize);
249 ktrace_exit(td);
250 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
251 if (error)
252 return (error);
253 if (wantsize > oldsize && newsize < wantsize)
254 return (ENOSPC);
255 return (0);
256 }
257 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool,
258 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ktr_requestpool, 0,
259 sysctl_kern_ktrace_request_pool, "IU",
260 "Pool buffer size for ktrace(1)");
261
262 static u_int
263 ktrace_resize_pool(u_int oldsize, u_int newsize)
264 {
265 STAILQ_HEAD(, ktr_request) ktr_new;
266 struct ktr_request *req;
267 int bound;
268
269 print_message = 1;
270 bound = newsize - oldsize;
271 if (bound == 0)
272 return (ktr_requestpool);
273 if (bound < 0) {
274 mtx_lock(&ktrace_mtx);
275 /* Shrink pool down to newsize if possible. */
276 while (bound++ < 0) {
277 req = STAILQ_FIRST(&ktr_free);
278 if (req == NULL)
279 break;
280 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
281 ktr_requestpool--;
282 free(req, M_KTRACE);
283 }
284 } else {
285 /* Grow pool up to newsize. */
286 STAILQ_INIT(&ktr_new);
287 while (bound-- > 0) {
288 req = malloc(sizeof(struct ktr_request), M_KTRACE,
289 M_WAITOK | M_ZERO);
290 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list);
291 }
292 mtx_lock(&ktrace_mtx);
293 STAILQ_CONCAT(&ktr_free, &ktr_new);
294 ktr_requestpool += (newsize - oldsize);
295 }
296 mtx_unlock(&ktrace_mtx);
297 return (ktr_requestpool);
298 }
299
300 /* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */
301 CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
302 (sizeof((struct thread *)NULL)->td_name));
303
304 static struct ktr_request *
305 ktr_getrequest_entered(struct thread *td, int type)
306 {
307 struct ktr_request *req;
308 struct proc *p = td->td_proc;
309 int pm;
310
311 mtx_lock(&ktrace_mtx);
312 if (!KTRCHECK(td, type)) {
313 mtx_unlock(&ktrace_mtx);
314 return (NULL);
315 }
316 req = STAILQ_FIRST(&ktr_free);
317 if (req != NULL) {
318 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
319 req->ktr_header.ktr_type = type;
320 if (p->p_traceflag & KTRFAC_DROP) {
321 req->ktr_header.ktr_type |= KTR_DROP;
322 p->p_traceflag &= ~KTRFAC_DROP;
323 }
324 mtx_unlock(&ktrace_mtx);
325 microtime(&req->ktr_header.ktr_time);
326 req->ktr_header.ktr_pid = p->p_pid;
327 req->ktr_header.ktr_tid = td->td_tid;
328 bcopy(td->td_name, req->ktr_header.ktr_comm,
329 sizeof(req->ktr_header.ktr_comm));
330 req->ktr_buffer = NULL;
331 req->ktr_header.ktr_len = 0;
332 } else {
333 p->p_traceflag |= KTRFAC_DROP;
334 pm = print_message;
335 print_message = 0;
336 mtx_unlock(&ktrace_mtx);
337 if (pm)
338 printf("Out of ktrace request objects.\n");
339 }
340 return (req);
341 }
342
343 static struct ktr_request *
344 ktr_getrequest(int type)
345 {
346 struct thread *td = curthread;
347 struct ktr_request *req;
348
349 ktrace_enter(td);
350 req = ktr_getrequest_entered(td, type);
351 if (req == NULL)
352 ktrace_exit(td);
353
354 return (req);
355 }
356
357 /*
358 * Some trace generation environments don't permit direct access to VFS,
359 * such as during a context switch where sleeping is not allowed. Under these
360 * circumstances, queue a request to the thread to be written asynchronously
361 * later.
362 */
363 static void
364 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
365 {
366
367 mtx_lock(&ktrace_mtx);
368 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
369 mtx_unlock(&ktrace_mtx);
370 thread_lock(td);
371 td->td_flags |= TDF_ASTPENDING;
372 thread_unlock(td);
373 }
374
375 /*
376 * Drain any pending ktrace records from the per-thread queue to disk. This
377 * is used both internally before committing other records, and also on
378 * system call return. We drain all the ones we can find at the time when
379 * drain is requested, but don't keep draining after that as those events
380 * may be approximately "after" the current event.
381 */
382 static void
383 ktr_drain(struct thread *td)
384 {
385 struct ktr_request *queued_req;
386 STAILQ_HEAD(, ktr_request) local_queue;
387
388 ktrace_assert(td);
389 sx_assert(&ktrace_sx, SX_XLOCKED);
390
391 STAILQ_INIT(&local_queue);
392
393 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
394 mtx_lock(&ktrace_mtx);
395 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
396 mtx_unlock(&ktrace_mtx);
397
398 while ((queued_req = STAILQ_FIRST(&local_queue))) {
399 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
400 ktr_writerequest(td, queued_req);
401 ktr_freerequest(queued_req);
402 }
403 }
404 }
405
406 /*
407 * Submit a trace record for immediate commit to disk -- to be used only
408 * where entering VFS is OK. First drain any pending records that may have
409 * been cached in the thread.
410 */
411 static void
412 ktr_submitrequest(struct thread *td, struct ktr_request *req)
413 {
414
415 ktrace_assert(td);
416
417 sx_xlock(&ktrace_sx);
418 ktr_drain(td);
419 ktr_writerequest(td, req);
420 ktr_freerequest(req);
421 sx_xunlock(&ktrace_sx);
422 ktrace_exit(td);
423 }
424
425 static void
426 ktr_freerequest(struct ktr_request *req)
427 {
428
429 mtx_lock(&ktrace_mtx);
430 ktr_freerequest_locked(req);
431 mtx_unlock(&ktrace_mtx);
432 }
433
434 static void
435 ktr_freerequest_locked(struct ktr_request *req)
436 {
437
438 mtx_assert(&ktrace_mtx, MA_OWNED);
439 if (req->ktr_buffer != NULL)
440 free(req->ktr_buffer, M_KTRACE);
441 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
442 }
443
444 static void
445 ktr_io_params_ref(struct ktr_io_params *kiop)
446 {
447 mtx_assert(&ktrace_mtx, MA_OWNED);
448 kiop->refs++;
449 }
450
451 static struct ktr_io_params *
452 ktr_io_params_rele(struct ktr_io_params *kiop)
453 {
454 mtx_assert(&ktrace_mtx, MA_OWNED);
455 if (kiop == NULL)
456 return (NULL);
457 KASSERT(kiop->refs > 0, ("kiop ref == 0 %p", kiop));
458 return (--(kiop->refs) == 0 ? kiop : NULL);
459 }
460
461 void
462 ktr_io_params_free(struct ktr_io_params *kiop)
463 {
464 if (kiop == NULL)
465 return;
466
467 MPASS(kiop->refs == 0);
468 vn_close(kiop->vp, FWRITE, kiop->cr, curthread);
469 crfree(kiop->cr);
470 free(kiop, M_KTRACE);
471 }
472
473 static struct ktr_io_params *
474 ktr_io_params_alloc(struct thread *td, struct vnode *vp)
475 {
476 struct ktr_io_params *res;
477
478 res = malloc(sizeof(struct ktr_io_params), M_KTRACE, M_WAITOK);
479 res->vp = vp;
480 res->cr = crhold(td->td_ucred);
481 res->lim = lim_cur(td, RLIMIT_FSIZE);
482 res->refs = 1;
483 return (res);
484 }
485
486 /*
487 * Disable tracing for a process and release all associated resources.
488 * The caller is responsible for releasing a reference on the returned
489 * vnode and credentials.
490 */
491 static struct ktr_io_params *
492 ktr_freeproc(struct proc *p)
493 {
494 struct ktr_io_params *kiop;
495 struct ktr_request *req;
496
497 PROC_LOCK_ASSERT(p, MA_OWNED);
498 mtx_assert(&ktrace_mtx, MA_OWNED);
499 kiop = ktr_io_params_rele(p->p_ktrioparms);
500 p->p_ktrioparms = NULL;
501 p->p_traceflag = 0;
502 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
503 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
504 ktr_freerequest_locked(req);
505 }
506 return (kiop);
507 }
508
509 struct vnode *
510 ktr_get_tracevp(struct proc *p, bool ref)
511 {
512 struct vnode *vp;
513
514 PROC_LOCK_ASSERT(p, MA_OWNED);
515
516 if (p->p_ktrioparms != NULL) {
517 vp = p->p_ktrioparms->vp;
518 if (ref)
519 vrefact(vp);
520 } else {
521 vp = NULL;
522 }
523 return (vp);
524 }
525
526 void
527 ktrsyscall(int code, int narg, register_t args[])
528 {
529 struct ktr_request *req;
530 struct ktr_syscall *ktp;
531 size_t buflen;
532 char *buf = NULL;
533
534 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
535 return;
536
537 buflen = sizeof(register_t) * narg;
538 if (buflen > 0) {
539 buf = malloc(buflen, M_KTRACE, M_WAITOK);
540 bcopy(args, buf, buflen);
541 }
542 req = ktr_getrequest(KTR_SYSCALL);
543 if (req == NULL) {
544 if (buf != NULL)
545 free(buf, M_KTRACE);
546 return;
547 }
548 ktp = &req->ktr_data.ktr_syscall;
549 ktp->ktr_code = code;
550 ktp->ktr_narg = narg;
551 if (buflen > 0) {
552 req->ktr_header.ktr_len = buflen;
553 req->ktr_buffer = buf;
554 }
555 ktr_submitrequest(curthread, req);
556 }
557
558 void
559 ktrsysret(int code, int error, register_t retval)
560 {
561 struct ktr_request *req;
562 struct ktr_sysret *ktp;
563
564 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
565 return;
566
567 req = ktr_getrequest(KTR_SYSRET);
568 if (req == NULL)
569 return;
570 ktp = &req->ktr_data.ktr_sysret;
571 ktp->ktr_code = code;
572 ktp->ktr_error = error;
573 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */
574 ktr_submitrequest(curthread, req);
575 }
576
577 /*
578 * When a setuid process execs, disable tracing.
579 *
580 * XXX: We toss any pending asynchronous records.
581 */
582 struct ktr_io_params *
583 ktrprocexec(struct proc *p)
584 {
585 struct ktr_io_params *kiop;
586
587 PROC_LOCK_ASSERT(p, MA_OWNED);
588
589 kiop = p->p_ktrioparms;
590 if (kiop == NULL || priv_check_cred(kiop->cr, PRIV_DEBUG_DIFFCRED))
591 return (NULL);
592
593 mtx_lock(&ktrace_mtx);
594 kiop = ktr_freeproc(p);
595 mtx_unlock(&ktrace_mtx);
596 return (kiop);
597 }
598
599 /*
600 * When a process exits, drain per-process asynchronous trace records
601 * and disable tracing.
602 */
603 void
604 ktrprocexit(struct thread *td)
605 {
606 struct ktr_request *req;
607 struct proc *p;
608 struct ktr_io_params *kiop;
609
610 p = td->td_proc;
611 if (p->p_traceflag == 0)
612 return;
613
614 ktrace_enter(td);
615 req = ktr_getrequest_entered(td, KTR_PROCDTOR);
616 if (req != NULL)
617 ktr_enqueuerequest(td, req);
618 sx_xlock(&ktrace_sx);
619 ktr_drain(td);
620 sx_xunlock(&ktrace_sx);
621 PROC_LOCK(p);
622 mtx_lock(&ktrace_mtx);
623 kiop = ktr_freeproc(p);
624 mtx_unlock(&ktrace_mtx);
625 PROC_UNLOCK(p);
626 ktr_io_params_free(kiop);
627 ktrace_exit(td);
628 }
629
630 static void
631 ktrprocctor_entered(struct thread *td, struct proc *p)
632 {
633 struct ktr_proc_ctor *ktp;
634 struct ktr_request *req;
635 struct thread *td2;
636
637 ktrace_assert(td);
638 td2 = FIRST_THREAD_IN_PROC(p);
639 req = ktr_getrequest_entered(td2, KTR_PROCCTOR);
640 if (req == NULL)
641 return;
642 ktp = &req->ktr_data.ktr_proc_ctor;
643 ktp->sv_flags = p->p_sysent->sv_flags;
644 ktr_enqueuerequest(td2, req);
645 }
646
647 void
648 ktrprocctor(struct proc *p)
649 {
650 struct thread *td = curthread;
651
652 if ((p->p_traceflag & KTRFAC_MASK) == 0)
653 return;
654
655 ktrace_enter(td);
656 ktrprocctor_entered(td, p);
657 ktrace_exit(td);
658 }
659
660 /*
661 * When a process forks, enable tracing in the new process if needed.
662 */
663 void
664 ktrprocfork(struct proc *p1, struct proc *p2)
665 {
666
667 MPASS(p2->p_ktrioparms == NULL);
668 MPASS(p2->p_traceflag == 0);
669
670 if (p1->p_traceflag == 0)
671 return;
672
673 PROC_LOCK(p1);
674 mtx_lock(&ktrace_mtx);
675 if (p1->p_traceflag & KTRFAC_INHERIT) {
676 p2->p_traceflag = p1->p_traceflag;
677 if ((p2->p_ktrioparms = p1->p_ktrioparms) != NULL)
678 p1->p_ktrioparms->refs++;
679 }
680 mtx_unlock(&ktrace_mtx);
681 PROC_UNLOCK(p1);
682
683 ktrprocctor(p2);
684 }
685
686 /*
687 * When a thread returns, drain any asynchronous records generated by the
688 * system call.
689 */
690 void
691 ktruserret(struct thread *td)
692 {
693
694 ktrace_enter(td);
695 sx_xlock(&ktrace_sx);
696 ktr_drain(td);
697 sx_xunlock(&ktrace_sx);
698 ktrace_exit(td);
699 }
700
701 void
702 ktrnamei(path)
703 char *path;
704 {
705 struct ktr_request *req;
706 int namelen;
707 char *buf = NULL;
708
709 namelen = strlen(path);
710 if (namelen > 0) {
711 buf = malloc(namelen, M_KTRACE, M_WAITOK);
712 bcopy(path, buf, namelen);
713 }
714 req = ktr_getrequest(KTR_NAMEI);
715 if (req == NULL) {
716 if (buf != NULL)
717 free(buf, M_KTRACE);
718 return;
719 }
720 if (namelen > 0) {
721 req->ktr_header.ktr_len = namelen;
722 req->ktr_buffer = buf;
723 }
724 ktr_submitrequest(curthread, req);
725 }
726
727 void
728 ktrsysctl(int *name, u_int namelen)
729 {
730 struct ktr_request *req;
731 u_int mib[CTL_MAXNAME + 2];
732 char *mibname;
733 size_t mibnamelen;
734 int error;
735
736 /* Lookup name of mib. */
737 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
738 mib[0] = 0;
739 mib[1] = 1;
740 bcopy(name, mib + 2, namelen * sizeof(*name));
741 mibnamelen = 128;
742 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
743 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
744 NULL, 0, &mibnamelen, 0);
745 if (error) {
746 free(mibname, M_KTRACE);
747 return;
748 }
749 req = ktr_getrequest(KTR_SYSCTL);
750 if (req == NULL) {
751 free(mibname, M_KTRACE);
752 return;
753 }
754 req->ktr_header.ktr_len = mibnamelen;
755 req->ktr_buffer = mibname;
756 ktr_submitrequest(curthread, req);
757 }
758
759 void
760 ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error)
761 {
762 struct ktr_request *req;
763 struct ktr_genio *ktg;
764 int datalen;
765 char *buf;
766
767 if (error) {
768 free(uio, M_IOV);
769 return;
770 }
771 uio->uio_offset = 0;
772 uio->uio_rw = UIO_WRITE;
773 datalen = MIN(uio->uio_resid, ktr_geniosize);
774 buf = malloc(datalen, M_KTRACE, M_WAITOK);
775 error = uiomove(buf, datalen, uio);
776 free(uio, M_IOV);
777 if (error) {
778 free(buf, M_KTRACE);
779 return;
780 }
781 req = ktr_getrequest(KTR_GENIO);
782 if (req == NULL) {
783 free(buf, M_KTRACE);
784 return;
785 }
786 ktg = &req->ktr_data.ktr_genio;
787 ktg->ktr_fd = fd;
788 ktg->ktr_rw = rw;
789 req->ktr_header.ktr_len = datalen;
790 req->ktr_buffer = buf;
791 ktr_submitrequest(curthread, req);
792 }
793
794 void
795 ktrpsig(int sig, sig_t action, sigset_t *mask, int code)
796 {
797 struct thread *td = curthread;
798 struct ktr_request *req;
799 struct ktr_psig *kp;
800
801 req = ktr_getrequest(KTR_PSIG);
802 if (req == NULL)
803 return;
804 kp = &req->ktr_data.ktr_psig;
805 kp->signo = (char)sig;
806 kp->action = action;
807 kp->mask = *mask;
808 kp->code = code;
809 ktr_enqueuerequest(td, req);
810 ktrace_exit(td);
811 }
812
813 void
814 ktrcsw(int out, int user, const char *wmesg)
815 {
816 struct thread *td = curthread;
817 struct ktr_request *req;
818 struct ktr_csw *kc;
819
820 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
821 return;
822
823 req = ktr_getrequest(KTR_CSW);
824 if (req == NULL)
825 return;
826 kc = &req->ktr_data.ktr_csw;
827 kc->out = out;
828 kc->user = user;
829 if (wmesg != NULL)
830 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg));
831 else
832 bzero(kc->wmesg, sizeof(kc->wmesg));
833 ktr_enqueuerequest(td, req);
834 ktrace_exit(td);
835 }
836
837 void
838 ktrstruct(const char *name, const void *data, size_t datalen)
839 {
840 struct ktr_request *req;
841 char *buf;
842 size_t buflen, namelen;
843
844 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
845 return;
846
847 if (data == NULL)
848 datalen = 0;
849 namelen = strlen(name) + 1;
850 buflen = namelen + datalen;
851 buf = malloc(buflen, M_KTRACE, M_WAITOK);
852 strcpy(buf, name);
853 bcopy(data, buf + namelen, datalen);
854 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
855 free(buf, M_KTRACE);
856 return;
857 }
858 req->ktr_buffer = buf;
859 req->ktr_header.ktr_len = buflen;
860 ktr_submitrequest(curthread, req);
861 }
862
863 void
864 ktrstruct_error(const char *name, const void *data, size_t datalen, int error)
865 {
866
867 if (error == 0)
868 ktrstruct(name, data, datalen);
869 }
870
871 void
872 ktrstructarray(const char *name, enum uio_seg seg, const void *data,
873 int num_items, size_t struct_size)
874 {
875 struct ktr_request *req;
876 struct ktr_struct_array *ksa;
877 char *buf;
878 size_t buflen, datalen, namelen;
879 int max_items;
880
881 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
882 return;
883 if (num_items < 0)
884 return;
885
886 /* Trim array length to genio size. */
887 max_items = ktr_geniosize / struct_size;
888 if (num_items > max_items) {
889 if (max_items == 0)
890 num_items = 1;
891 else
892 num_items = max_items;
893 }
894 datalen = num_items * struct_size;
895
896 if (data == NULL)
897 datalen = 0;
898
899 namelen = strlen(name) + 1;
900 buflen = namelen + datalen;
901 buf = malloc(buflen, M_KTRACE, M_WAITOK);
902 strcpy(buf, name);
903 if (seg == UIO_SYSSPACE)
904 bcopy(data, buf + namelen, datalen);
905 else {
906 if (copyin(data, buf + namelen, datalen) != 0) {
907 free(buf, M_KTRACE);
908 return;
909 }
910 }
911 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) {
912 free(buf, M_KTRACE);
913 return;
914 }
915 ksa = &req->ktr_data.ktr_struct_array;
916 ksa->struct_size = struct_size;
917 req->ktr_buffer = buf;
918 req->ktr_header.ktr_len = buflen;
919 ktr_submitrequest(curthread, req);
920 }
921
922 void
923 ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed,
924 const cap_rights_t *held)
925 {
926 struct thread *td = curthread;
927 struct ktr_request *req;
928 struct ktr_cap_fail *kcf;
929
930 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
931 return;
932
933 req = ktr_getrequest(KTR_CAPFAIL);
934 if (req == NULL)
935 return;
936 kcf = &req->ktr_data.ktr_cap_fail;
937 kcf->cap_type = type;
938 if (needed != NULL)
939 kcf->cap_needed = *needed;
940 else
941 cap_rights_init(&kcf->cap_needed);
942 if (held != NULL)
943 kcf->cap_held = *held;
944 else
945 cap_rights_init(&kcf->cap_held);
946 ktr_enqueuerequest(td, req);
947 ktrace_exit(td);
948 }
949
950 void
951 ktrfault(vm_offset_t vaddr, int type)
952 {
953 struct thread *td = curthread;
954 struct ktr_request *req;
955 struct ktr_fault *kf;
956
957 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
958 return;
959
960 req = ktr_getrequest(KTR_FAULT);
961 if (req == NULL)
962 return;
963 kf = &req->ktr_data.ktr_fault;
964 kf->vaddr = vaddr;
965 kf->type = type;
966 ktr_enqueuerequest(td, req);
967 ktrace_exit(td);
968 }
969
970 void
971 ktrfaultend(int result)
972 {
973 struct thread *td = curthread;
974 struct ktr_request *req;
975 struct ktr_faultend *kf;
976
977 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
978 return;
979
980 req = ktr_getrequest(KTR_FAULTEND);
981 if (req == NULL)
982 return;
983 kf = &req->ktr_data.ktr_faultend;
984 kf->result = result;
985 ktr_enqueuerequest(td, req);
986 ktrace_exit(td);
987 }
988 #endif /* KTRACE */
989
990 /* Interface and common routines */
991
992 #ifndef _SYS_SYSPROTO_H_
993 struct ktrace_args {
994 char *fname;
995 int ops;
996 int facs;
997 int pid;
998 };
999 #endif
1000 /* ARGSUSED */
1001 int
1002 sys_ktrace(struct thread *td, struct ktrace_args *uap)
1003 {
1004 #ifdef KTRACE
1005 struct vnode *vp = NULL;
1006 struct proc *p;
1007 struct pgrp *pg;
1008 int facs = uap->facs & ~KTRFAC_ROOT;
1009 int ops = KTROP(uap->ops);
1010 int descend = uap->ops & KTRFLAG_DESCEND;
1011 int nfound, ret = 0;
1012 int flags, error = 0;
1013 struct nameidata nd;
1014 struct ktr_io_params *kiop, *old_kiop;
1015
1016 /*
1017 * Need something to (un)trace.
1018 */
1019 if (ops != KTROP_CLEARFILE && facs == 0)
1020 return (EINVAL);
1021
1022 kiop = NULL;
1023 ktrace_enter(td);
1024 if (ops != KTROP_CLEAR) {
1025 /*
1026 * an operation which requires a file argument.
1027 */
1028 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname, td);
1029 flags = FREAD | FWRITE | O_NOFOLLOW;
1030 error = vn_open(&nd, &flags, 0, NULL);
1031 if (error) {
1032 ktrace_exit(td);
1033 return (error);
1034 }
1035 NDFREE(&nd, NDF_ONLY_PNBUF);
1036 vp = nd.ni_vp;
1037 VOP_UNLOCK(vp);
1038 if (vp->v_type != VREG) {
1039 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
1040 ktrace_exit(td);
1041 return (EACCES);
1042 }
1043 kiop = ktr_io_params_alloc(td, vp);
1044 }
1045 /*
1046 * Clear all uses of the tracefile.
1047 */
1048 if (ops == KTROP_CLEARFILE) {
1049 restart:
1050 sx_slock(&allproc_lock);
1051 FOREACH_PROC_IN_SYSTEM(p) {
1052 old_kiop = NULL;
1053 PROC_LOCK(p);
1054 if (p->p_ktrioparms != NULL &&
1055 p->p_ktrioparms->vp == vp) {
1056 if (ktrcanset(td, p)) {
1057 mtx_lock(&ktrace_mtx);
1058 old_kiop = ktr_freeproc(p);
1059 mtx_unlock(&ktrace_mtx);
1060 } else
1061 error = EPERM;
1062 }
1063 PROC_UNLOCK(p);
1064 if (old_kiop != NULL) {
1065 sx_sunlock(&allproc_lock);
1066 ktr_io_params_free(old_kiop);
1067 goto restart;
1068 }
1069 }
1070 sx_sunlock(&allproc_lock);
1071 goto done;
1072 }
1073 /*
1074 * do it
1075 */
1076 sx_slock(&proctree_lock);
1077 if (uap->pid < 0) {
1078 /*
1079 * by process group
1080 */
1081 pg = pgfind(-uap->pid);
1082 if (pg == NULL) {
1083 sx_sunlock(&proctree_lock);
1084 error = ESRCH;
1085 goto done;
1086 }
1087 /*
1088 * ktrops() may call vrele(). Lock pg_members
1089 * by the proctree_lock rather than pg_mtx.
1090 */
1091 PGRP_UNLOCK(pg);
1092 nfound = 0;
1093 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1094 PROC_LOCK(p);
1095 if (p->p_state == PRS_NEW ||
1096 p_cansee(td, p) != 0) {
1097 PROC_UNLOCK(p);
1098 continue;
1099 }
1100 nfound++;
1101 if (descend)
1102 ret |= ktrsetchildren(td, p, ops, facs, kiop);
1103 else
1104 ret |= ktrops(td, p, ops, facs, kiop);
1105 }
1106 if (nfound == 0) {
1107 sx_sunlock(&proctree_lock);
1108 error = ESRCH;
1109 goto done;
1110 }
1111 } else {
1112 /*
1113 * by pid
1114 */
1115 p = pfind(uap->pid);
1116 if (p == NULL)
1117 error = ESRCH;
1118 else
1119 error = p_cansee(td, p);
1120 if (error) {
1121 if (p != NULL)
1122 PROC_UNLOCK(p);
1123 sx_sunlock(&proctree_lock);
1124 goto done;
1125 }
1126 if (descend)
1127 ret |= ktrsetchildren(td, p, ops, facs, kiop);
1128 else
1129 ret |= ktrops(td, p, ops, facs, kiop);
1130 }
1131 sx_sunlock(&proctree_lock);
1132 if (!ret)
1133 error = EPERM;
1134 done:
1135 if (kiop != NULL) {
1136 mtx_lock(&ktrace_mtx);
1137 kiop = ktr_io_params_rele(kiop);
1138 mtx_unlock(&ktrace_mtx);
1139 ktr_io_params_free(kiop);
1140 }
1141 ktrace_exit(td);
1142 return (error);
1143 #else /* !KTRACE */
1144 return (ENOSYS);
1145 #endif /* KTRACE */
1146 }
1147
1148 /* ARGSUSED */
1149 int
1150 sys_utrace(struct thread *td, struct utrace_args *uap)
1151 {
1152
1153 #ifdef KTRACE
1154 struct ktr_request *req;
1155 void *cp;
1156 int error;
1157
1158 if (!KTRPOINT(td, KTR_USER))
1159 return (0);
1160 if (uap->len > KTR_USER_MAXLEN)
1161 return (EINVAL);
1162 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
1163 error = copyin(uap->addr, cp, uap->len);
1164 if (error) {
1165 free(cp, M_KTRACE);
1166 return (error);
1167 }
1168 req = ktr_getrequest(KTR_USER);
1169 if (req == NULL) {
1170 free(cp, M_KTRACE);
1171 return (ENOMEM);
1172 }
1173 req->ktr_buffer = cp;
1174 req->ktr_header.ktr_len = uap->len;
1175 ktr_submitrequest(td, req);
1176 return (0);
1177 #else /* !KTRACE */
1178 return (ENOSYS);
1179 #endif /* KTRACE */
1180 }
1181
1182 #ifdef KTRACE
1183 static int
1184 ktrops(struct thread *td, struct proc *p, int ops, int facs,
1185 struct ktr_io_params *new_kiop)
1186 {
1187 struct ktr_io_params *old_kiop;
1188
1189 PROC_LOCK_ASSERT(p, MA_OWNED);
1190 if (!ktrcanset(td, p)) {
1191 PROC_UNLOCK(p);
1192 return (0);
1193 }
1194 if (p->p_flag & P_WEXIT) {
1195 /* If the process is exiting, just ignore it. */
1196 PROC_UNLOCK(p);
1197 return (1);
1198 }
1199 old_kiop = NULL;
1200 mtx_lock(&ktrace_mtx);
1201 if (ops == KTROP_SET) {
1202 if (p->p_ktrioparms != NULL &&
1203 p->p_ktrioparms->vp != new_kiop->vp) {
1204 /* if trace file already in use, relinquish below */
1205 old_kiop = ktr_io_params_rele(p->p_ktrioparms);
1206 p->p_ktrioparms = NULL;
1207 }
1208 if (p->p_ktrioparms == NULL) {
1209 p->p_ktrioparms = new_kiop;
1210 ktr_io_params_ref(new_kiop);
1211 }
1212 p->p_traceflag |= facs;
1213 if (priv_check(td, PRIV_KTRACE) == 0)
1214 p->p_traceflag |= KTRFAC_ROOT;
1215 } else {
1216 /* KTROP_CLEAR */
1217 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
1218 /* no more tracing */
1219 old_kiop = ktr_freeproc(p);
1220 }
1221 mtx_unlock(&ktrace_mtx);
1222 if ((p->p_traceflag & KTRFAC_MASK) != 0)
1223 ktrprocctor_entered(td, p);
1224 PROC_UNLOCK(p);
1225 ktr_io_params_free(old_kiop);
1226
1227 return (1);
1228 }
1229
1230 static int
1231 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs,
1232 struct ktr_io_params *new_kiop)
1233 {
1234 struct proc *p;
1235 int ret = 0;
1236
1237 p = top;
1238 PROC_LOCK_ASSERT(p, MA_OWNED);
1239 sx_assert(&proctree_lock, SX_LOCKED);
1240 for (;;) {
1241 ret |= ktrops(td, p, ops, facs, new_kiop);
1242 /*
1243 * If this process has children, descend to them next,
1244 * otherwise do any siblings, and if done with this level,
1245 * follow back up the tree (but not past top).
1246 */
1247 if (!LIST_EMPTY(&p->p_children))
1248 p = LIST_FIRST(&p->p_children);
1249 else for (;;) {
1250 if (p == top)
1251 return (ret);
1252 if (LIST_NEXT(p, p_sibling)) {
1253 p = LIST_NEXT(p, p_sibling);
1254 break;
1255 }
1256 p = p->p_pptr;
1257 }
1258 PROC_LOCK(p);
1259 }
1260 /*NOTREACHED*/
1261 }
1262
1263 static void
1264 ktr_writerequest(struct thread *td, struct ktr_request *req)
1265 {
1266 struct ktr_io_params *kiop, *kiop1;
1267 struct ktr_header *kth;
1268 struct vnode *vp;
1269 struct proc *p;
1270 struct ucred *cred;
1271 struct uio auio;
1272 struct iovec aiov[3];
1273 struct mount *mp;
1274 off_t lim;
1275 int datalen, buflen;
1276 int error;
1277
1278 p = td->td_proc;
1279
1280 /*
1281 * We reference the kiop for use in I/O in case ktrace is
1282 * disabled on the process as we write out the request.
1283 */
1284 mtx_lock(&ktrace_mtx);
1285 kiop = p->p_ktrioparms;
1286
1287 /*
1288 * If kiop is NULL, it has been cleared out from under this
1289 * request, so just drop it.
1290 */
1291 if (kiop == NULL) {
1292 mtx_unlock(&ktrace_mtx);
1293 return;
1294 }
1295
1296 ktr_io_params_ref(kiop);
1297 vp = kiop->vp;
1298 cred = kiop->cr;
1299 lim = kiop->lim;
1300
1301 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
1302 mtx_unlock(&ktrace_mtx);
1303
1304 kth = &req->ktr_header;
1305 KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) < nitems(data_lengths),
1306 ("data_lengths array overflow"));
1307 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
1308 buflen = kth->ktr_len;
1309 auio.uio_iov = &aiov[0];
1310 auio.uio_offset = 0;
1311 auio.uio_segflg = UIO_SYSSPACE;
1312 auio.uio_rw = UIO_WRITE;
1313 aiov[0].iov_base = (caddr_t)kth;
1314 aiov[0].iov_len = sizeof(struct ktr_header);
1315 auio.uio_resid = sizeof(struct ktr_header);
1316 auio.uio_iovcnt = 1;
1317 auio.uio_td = td;
1318 if (datalen != 0) {
1319 aiov[1].iov_base = (caddr_t)&req->ktr_data;
1320 aiov[1].iov_len = datalen;
1321 auio.uio_resid += datalen;
1322 auio.uio_iovcnt++;
1323 kth->ktr_len += datalen;
1324 }
1325 if (buflen != 0) {
1326 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
1327 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
1328 aiov[auio.uio_iovcnt].iov_len = buflen;
1329 auio.uio_resid += buflen;
1330 auio.uio_iovcnt++;
1331 }
1332
1333 vn_start_write(vp, &mp, V_WAIT);
1334 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1335 td->td_ktr_io_lim = lim;
1336 #ifdef MAC
1337 error = mac_vnode_check_write(cred, NOCRED, vp);
1338 if (error == 0)
1339 #endif
1340 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
1341 VOP_UNLOCK(vp);
1342 vn_finished_write(mp);
1343 if (error == 0) {
1344 mtx_lock(&ktrace_mtx);
1345 kiop = ktr_io_params_rele(kiop);
1346 mtx_unlock(&ktrace_mtx);
1347 ktr_io_params_free(kiop);
1348 return;
1349 }
1350
1351 /*
1352 * If error encountered, give up tracing on this vnode on this
1353 * process. Other processes might still be suitable for
1354 * writes to this vnode.
1355 */
1356 log(LOG_NOTICE,
1357 "ktrace write failed, errno %d, tracing stopped for pid %d\n",
1358 error, p->p_pid);
1359
1360 kiop1 = NULL;
1361 PROC_LOCK(p);
1362 mtx_lock(&ktrace_mtx);
1363 if (p->p_ktrioparms != NULL && p->p_ktrioparms->vp == vp)
1364 kiop1 = ktr_freeproc(p);
1365 kiop = ktr_io_params_rele(kiop);
1366 mtx_unlock(&ktrace_mtx);
1367 PROC_UNLOCK(p);
1368 ktr_io_params_free(kiop1);
1369 ktr_io_params_free(kiop);
1370 }
1371
1372 /*
1373 * Return true if caller has permission to set the ktracing state
1374 * of target. Essentially, the target can't possess any
1375 * more permissions than the caller. KTRFAC_ROOT signifies that
1376 * root previously set the tracing status on the target process, and
1377 * so, only root may further change it.
1378 */
1379 static int
1380 ktrcanset(struct thread *td, struct proc *targetp)
1381 {
1382
1383 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1384 if (targetp->p_traceflag & KTRFAC_ROOT &&
1385 priv_check(td, PRIV_KTRACE))
1386 return (0);
1387
1388 if (p_candebug(td, targetp) != 0)
1389 return (0);
1390
1391 return (1);
1392 }
1393
1394 #endif /* KTRACE */
Cache object: 559d767b558539aa6dcaa7ae72d4f86a
|