1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Portions Copyright 2010 The FreeBSD Foundation
22 *
23 * $FreeBSD$
24 */
25
26 /*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31 /*
32 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
33 */
34
35 #include <sys/atomic.h>
36 #include <sys/errno.h>
37 #include <sys/stat.h>
38 #include <sys/endian.h>
39 #include <sys/modctl.h>
40 #include <sys/conf.h>
41 #include <sys/systm.h>
42 #ifdef illumos
43 #include <sys/ddi.h>
44 #endif
45 #include <sys/sunddi.h>
46 #include <sys/cpuvar.h>
47 #include <sys/kmem.h>
48 #ifdef illumos
49 #include <sys/strsubr.h>
50 #endif
51 #include <sys/fasttrap.h>
52 #include <sys/fasttrap_impl.h>
53 #include <sys/fasttrap_isa.h>
54 #include <sys/dtrace.h>
55 #include <sys/dtrace_impl.h>
56 #include <sys/sysmacros.h>
57 #include <sys/proc.h>
58 #undef AT_UID
59 #undef AT_GID
60 #include <sys/policy.h>
61 #ifdef illumos
62 #include <util/qsort.h>
63 #endif
64 #include <sys/mutex.h>
65 #include <sys/kernel.h>
66 #ifndef illumos
67 #include <sys/dtrace_bsd.h>
68 #include <sys/eventhandler.h>
69 #include <sys/rmlock.h>
70 #include <sys/sysent.h>
71 #include <sys/sysctl.h>
72 #include <sys/u8_textprep.h>
73 #include <sys/user.h>
74
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_param.h>
79
80 #include <cddl/dev/dtrace/dtrace_cddl.h>
81 #endif
82
83 /*
84 * User-Land Trap-Based Tracing
85 * ----------------------------
86 *
87 * The fasttrap provider allows DTrace consumers to instrument any user-level
88 * instruction to gather data; this includes probes with semantic
89 * signifigance like entry and return as well as simple offsets into the
90 * function. While the specific techniques used are very ISA specific, the
91 * methodology is generalizable to any architecture.
92 *
93 *
94 * The General Methodology
95 * -----------------------
96 *
97 * With the primary goal of tracing every user-land instruction and the
98 * limitation that we can't trust user space so don't want to rely on much
99 * information there, we begin by replacing the instructions we want to trace
100 * with trap instructions. Each instruction we overwrite is saved into a hash
101 * table keyed by process ID and pc address. When we enter the kernel due to
102 * this trap instruction, we need the effects of the replaced instruction to
103 * appear to have occurred before we proceed with the user thread's
104 * execution.
105 *
106 * Each user level thread is represented by a ulwp_t structure which is
107 * always easily accessible through a register. The most basic way to produce
108 * the effects of the instruction we replaced is to copy that instruction out
109 * to a bit of scratch space reserved in the user thread's ulwp_t structure
110 * (a sort of kernel-private thread local storage), set the PC to that
111 * scratch space and single step. When we reenter the kernel after single
112 * stepping the instruction we must then adjust the PC to point to what would
113 * normally be the next instruction. Of course, special care must be taken
114 * for branches and jumps, but these represent such a small fraction of any
115 * instruction set that writing the code to emulate these in the kernel is
116 * not too difficult.
117 *
118 * Return probes may require several tracepoints to trace every return site,
119 * and, conversely, each tracepoint may activate several probes (the entry
120 * and offset 0 probes, for example). To solve this muliplexing problem,
121 * tracepoints contain lists of probes to activate and probes contain lists
122 * of tracepoints to enable. If a probe is activated, it adds its ID to
123 * existing tracepoints or creates new ones as necessary.
124 *
125 * Most probes are activated _before_ the instruction is executed, but return
126 * probes are activated _after_ the effects of the last instruction of the
127 * function are visible. Return probes must be fired _after_ we have
128 * single-stepped the instruction whereas all other probes are fired
129 * beforehand.
130 *
131 *
132 * Lock Ordering
133 * -------------
134 *
135 * The lock ordering below -- both internally and with respect to the DTrace
136 * framework -- is a little tricky and bears some explanation. Each provider
137 * has a lock (ftp_mtx) that protects its members including reference counts
138 * for enabled probes (ftp_rcount), consumers actively creating probes
139 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
140 * from being freed. A provider is looked up by taking the bucket lock for the
141 * provider hash table, and is returned with its lock held. The provider lock
142 * may be taken in functions invoked by the DTrace framework, but may not be
143 * held while calling functions in the DTrace framework.
144 *
145 * To ensure consistency over multiple calls to the DTrace framework, the
146 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
147 * not be taken when holding the provider lock as that would create a cyclic
148 * lock ordering. In situations where one would naturally take the provider
149 * lock and then the creation lock, we instead up a reference count to prevent
150 * the provider from disappearing, drop the provider lock, and acquire the
151 * creation lock.
152 *
153 * Briefly:
154 * bucket lock before provider lock
155 * DTrace before provider lock
156 * creation lock before DTrace
157 * never hold the provider lock and creation lock simultaneously
158 */
159
160 static d_open_t fasttrap_open;
161 static d_ioctl_t fasttrap_ioctl;
162
163 static struct cdevsw fasttrap_cdevsw = {
164 .d_version = D_VERSION,
165 .d_open = fasttrap_open,
166 .d_ioctl = fasttrap_ioctl,
167 .d_name = "fasttrap",
168 };
169 static struct cdev *fasttrap_cdev;
170 static dtrace_meta_provider_id_t fasttrap_meta_id;
171
172 static struct proc *fasttrap_cleanup_proc;
173 static struct mtx fasttrap_cleanup_mtx;
174 static uint_t fasttrap_cleanup_work, fasttrap_cleanup_drain, fasttrap_cleanup_cv;
175
176 /*
177 * Generation count on modifications to the global tracepoint lookup table.
178 */
179 static volatile uint64_t fasttrap_mod_gen;
180
181 /*
182 * When the fasttrap provider is loaded, fasttrap_max is set to either
183 * FASTTRAP_MAX_DEFAULT, or the value for fasttrap-max-probes in the
184 * fasttrap.conf file (Illumos), or the value provied in the loader.conf (FreeBSD).
185 * Each time a probe is created, fasttrap_total is incremented by the number
186 * of tracepoints that may be associated with that probe; fasttrap_total is capped
187 * at fasttrap_max.
188 */
189 #define FASTTRAP_MAX_DEFAULT 250000
190 static uint32_t fasttrap_max = FASTTRAP_MAX_DEFAULT;
191 static uint32_t fasttrap_total;
192
193 /*
194 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
195 */
196
197 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
198 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
199 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
200
201 #define FASTTRAP_PID_NAME "pid"
202
203 fasttrap_hash_t fasttrap_tpoints;
204 static fasttrap_hash_t fasttrap_provs;
205 static fasttrap_hash_t fasttrap_procs;
206
207 static uint64_t fasttrap_pid_count; /* pid ref count */
208 static kmutex_t fasttrap_count_mtx; /* lock on ref count */
209
210 #define FASTTRAP_ENABLE_FAIL 1
211 #define FASTTRAP_ENABLE_PARTIAL 2
212
213 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
214 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
215
216 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
217 const dtrace_pattr_t *);
218 static void fasttrap_provider_retire(pid_t, const char *, int);
219 static void fasttrap_provider_free(fasttrap_provider_t *);
220
221 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
222 static void fasttrap_proc_release(fasttrap_proc_t *);
223
224 #ifndef illumos
225 static void fasttrap_thread_dtor(void *, struct thread *);
226 #endif
227
228 #define FASTTRAP_PROVS_INDEX(pid, name) \
229 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
230
231 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
232
233 #ifndef illumos
234 struct rmlock fasttrap_tp_lock;
235 static eventhandler_tag fasttrap_thread_dtor_tag;
236 #endif
237
238 static unsigned long tpoints_hash_size = FASTTRAP_TPOINTS_DEFAULT_SIZE;
239
240 #ifdef __FreeBSD__
241 SYSCTL_DECL(_kern_dtrace);
242 SYSCTL_NODE(_kern_dtrace, OID_AUTO, fasttrap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
243 "DTrace fasttrap parameters");
244 SYSCTL_UINT(_kern_dtrace_fasttrap, OID_AUTO, max_probes, CTLFLAG_RWTUN, &fasttrap_max,
245 FASTTRAP_MAX_DEFAULT, "Maximum number of fasttrap probes");
246 SYSCTL_ULONG(_kern_dtrace_fasttrap, OID_AUTO, tpoints_hash_size, CTLFLAG_RDTUN, &tpoints_hash_size,
247 FASTTRAP_TPOINTS_DEFAULT_SIZE, "Size of the tracepoint hash table");
248 #endif
249
250 static int
251 fasttrap_highbit(ulong_t i)
252 {
253 int h = 1;
254
255 if (i == 0)
256 return (0);
257 #ifdef _LP64
258 if (i & 0xffffffff00000000ul) {
259 h += 32; i >>= 32;
260 }
261 #endif
262 if (i & 0xffff0000) {
263 h += 16; i >>= 16;
264 }
265 if (i & 0xff00) {
266 h += 8; i >>= 8;
267 }
268 if (i & 0xf0) {
269 h += 4; i >>= 4;
270 }
271 if (i & 0xc) {
272 h += 2; i >>= 2;
273 }
274 if (i & 0x2) {
275 h += 1;
276 }
277 return (h);
278 }
279
280 static uint_t
281 fasttrap_hash_str(const char *p)
282 {
283 unsigned int g;
284 uint_t hval = 0;
285
286 while (*p) {
287 hval = (hval << 4) + *p++;
288 if ((g = (hval & 0xf0000000)) != 0)
289 hval ^= g >> 24;
290 hval &= ~g;
291 }
292 return (hval);
293 }
294
295 void
296 fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
297 {
298 ksiginfo_t ksi;
299
300 ksiginfo_init(&ksi);
301 ksi.ksi_signo = SIGTRAP;
302 ksi.ksi_code = TRAP_DTRACE;
303 ksi.ksi_addr = (caddr_t)pc;
304 PROC_LOCK(p);
305 (void)tdsendsignal(p, t, SIGTRAP, &ksi);
306 PROC_UNLOCK(p);
307 }
308
309 #ifndef illumos
310 /*
311 * Obtain a chunk of scratch space in the address space of the target process.
312 */
313 fasttrap_scrspace_t *
314 fasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc)
315 {
316 fasttrap_scrblock_t *scrblk;
317 fasttrap_scrspace_t *scrspc;
318 struct proc *p;
319 vm_offset_t addr;
320 int error, i;
321
322 scrspc = NULL;
323 if (td->t_dtrace_sscr != NULL) {
324 /* If the thread already has scratch space, we're done. */
325 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr;
326 return (scrspc);
327 }
328
329 p = td->td_proc;
330
331 mutex_enter(&fprc->ftpc_mtx);
332 if (LIST_EMPTY(&fprc->ftpc_fscr)) {
333 /*
334 * No scratch space is available, so we'll map a new scratch
335 * space block into the traced process' address space.
336 */
337 addr = 0;
338 error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr,
339 FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE,
340 VM_PROT_READ | VM_PROT_EXECUTE,
341 VM_PROT_READ | VM_PROT_EXECUTE, MAP_COPY_ON_WRITE);
342 if (error != KERN_SUCCESS)
343 goto done;
344
345 scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK);
346 scrblk->ftsb_addr = addr;
347 LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next);
348
349 /*
350 * Carve the block up into chunks and put them on the free list.
351 */
352 for (i = 0;
353 i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) {
354 scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK);
355 scrspc->ftss_addr = addr +
356 i * FASTTRAP_SCRSPACE_SIZE;
357 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc,
358 ftss_next);
359 }
360 }
361
362 /*
363 * Take the first scratch chunk off the free list, put it on the
364 * allocated list, and return its address.
365 */
366 scrspc = LIST_FIRST(&fprc->ftpc_fscr);
367 LIST_REMOVE(scrspc, ftss_next);
368 LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next);
369
370 /*
371 * This scratch space is reserved for use by td until the thread exits.
372 */
373 td->t_dtrace_sscr = scrspc;
374
375 done:
376 mutex_exit(&fprc->ftpc_mtx);
377
378 return (scrspc);
379 }
380
381 /*
382 * Return any allocated per-thread scratch space chunks back to the process'
383 * free list.
384 */
385 static void
386 fasttrap_thread_dtor(void *arg __unused, struct thread *td)
387 {
388 fasttrap_bucket_t *bucket;
389 fasttrap_proc_t *fprc;
390 fasttrap_scrspace_t *scrspc;
391 pid_t pid;
392
393 if (td->t_dtrace_sscr == NULL)
394 return;
395
396 pid = td->td_proc->p_pid;
397 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
398 fprc = NULL;
399
400 /* Look up the fasttrap process handle for this process. */
401 mutex_enter(&bucket->ftb_mtx);
402 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
403 if (fprc->ftpc_pid == pid) {
404 mutex_enter(&fprc->ftpc_mtx);
405 mutex_exit(&bucket->ftb_mtx);
406 break;
407 }
408 }
409 if (fprc == NULL) {
410 mutex_exit(&bucket->ftb_mtx);
411 return;
412 }
413
414 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr;
415 LIST_REMOVE(scrspc, ftss_next);
416 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next);
417
418 mutex_exit(&fprc->ftpc_mtx);
419 }
420 #endif
421
422 /*
423 * This function ensures that no threads are actively using the memory
424 * associated with probes that were formerly live.
425 */
426 static void
427 fasttrap_mod_barrier(uint64_t gen)
428 {
429 int i;
430
431 if (gen < fasttrap_mod_gen)
432 return;
433
434 fasttrap_mod_gen++;
435
436 #ifdef illumos
437 CPU_FOREACH(i) {
438 mutex_enter(&fasttrap_cpuc_pid_lock[i]);
439 mutex_exit(&fasttrap_cpuc_pid_lock[i]);
440 }
441 #else
442 rm_wlock(&fasttrap_tp_lock);
443 rm_wunlock(&fasttrap_tp_lock);
444 #endif
445 }
446
447 /*
448 * This function performs asynchronous cleanup of fasttrap providers. The
449 * Solaris implementation of this mechanism use a timeout that's activated in
450 * fasttrap_pid_cleanup(), but this doesn't work in FreeBSD: one may sleep while
451 * holding the DTrace mutexes, but it is unsafe to sleep in a callout handler.
452 * Thus we use a dedicated process to perform the cleanup when requested.
453 */
454 /*ARGSUSED*/
455 static void
456 fasttrap_pid_cleanup_cb(void *data)
457 {
458 fasttrap_provider_t **fpp, *fp;
459 fasttrap_bucket_t *bucket;
460 dtrace_provider_id_t provid;
461 int i, later = 0, rval;
462
463 mtx_lock(&fasttrap_cleanup_mtx);
464 while (!fasttrap_cleanup_drain || later > 0) {
465 fasttrap_cleanup_work = 0;
466 mtx_unlock(&fasttrap_cleanup_mtx);
467
468 later = 0;
469
470 /*
471 * Iterate over all the providers trying to remove the marked
472 * ones. If a provider is marked but not retired, we just
473 * have to take a crack at removing it -- it's no big deal if
474 * we can't.
475 */
476 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
477 bucket = &fasttrap_provs.fth_table[i];
478 mutex_enter(&bucket->ftb_mtx);
479 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
480
481 while ((fp = *fpp) != NULL) {
482 if (!fp->ftp_marked) {
483 fpp = &fp->ftp_next;
484 continue;
485 }
486
487 mutex_enter(&fp->ftp_mtx);
488
489 /*
490 * If this provider has consumers actively
491 * creating probes (ftp_ccount) or is a USDT
492 * provider (ftp_mcount), we can't unregister
493 * or even condense.
494 */
495 if (fp->ftp_ccount != 0 ||
496 fp->ftp_mcount != 0) {
497 mutex_exit(&fp->ftp_mtx);
498 fp->ftp_marked = 0;
499 continue;
500 }
501
502 if (!fp->ftp_retired || fp->ftp_rcount != 0)
503 fp->ftp_marked = 0;
504
505 mutex_exit(&fp->ftp_mtx);
506
507 /*
508 * If we successfully unregister this
509 * provider we can remove it from the hash
510 * chain and free the memory. If our attempt
511 * to unregister fails and this is a retired
512 * provider, increment our flag to try again
513 * pretty soon. If we've consumed more than
514 * half of our total permitted number of
515 * probes call dtrace_condense() to try to
516 * clean out the unenabled probes.
517 */
518 provid = fp->ftp_provid;
519 if ((rval = dtrace_unregister(provid)) != 0) {
520 if (fasttrap_total > fasttrap_max / 2)
521 (void) dtrace_condense(provid);
522
523 if (rval == EAGAIN)
524 fp->ftp_marked = 1;
525
526 later += fp->ftp_marked;
527 fpp = &fp->ftp_next;
528 } else {
529 *fpp = fp->ftp_next;
530 fasttrap_provider_free(fp);
531 }
532 }
533 mutex_exit(&bucket->ftb_mtx);
534 }
535 mtx_lock(&fasttrap_cleanup_mtx);
536
537 /*
538 * If we were unable to retire a provider, try again after a
539 * second. This situation can occur in certain circumstances
540 * where providers cannot be unregistered even though they have
541 * no probes enabled because of an execution of dtrace -l or
542 * something similar.
543 */
544 if (later > 0 || fasttrap_cleanup_work ||
545 fasttrap_cleanup_drain) {
546 mtx_unlock(&fasttrap_cleanup_mtx);
547 pause("ftclean", hz);
548 mtx_lock(&fasttrap_cleanup_mtx);
549 } else
550 mtx_sleep(&fasttrap_cleanup_cv, &fasttrap_cleanup_mtx,
551 0, "ftcl", 0);
552 }
553
554 /*
555 * Wake up the thread in fasttrap_unload() now that we're done.
556 */
557 wakeup(&fasttrap_cleanup_drain);
558 mtx_unlock(&fasttrap_cleanup_mtx);
559
560 kthread_exit();
561 }
562
563 /*
564 * Activates the asynchronous cleanup mechanism.
565 */
566 static void
567 fasttrap_pid_cleanup(void)
568 {
569
570 mtx_lock(&fasttrap_cleanup_mtx);
571 if (!fasttrap_cleanup_work) {
572 fasttrap_cleanup_work = 1;
573 wakeup(&fasttrap_cleanup_cv);
574 }
575 mtx_unlock(&fasttrap_cleanup_mtx);
576 }
577
578 /*
579 * This is called from cfork() via dtrace_fasttrap_fork(). The child
580 * process's address space is (roughly) a copy of the parent process's so
581 * we have to remove all the instrumentation we had previously enabled in the
582 * parent.
583 */
584 static void
585 fasttrap_fork(proc_t *p, proc_t *cp)
586 {
587 #ifndef illumos
588 fasttrap_scrblock_t *scrblk;
589 fasttrap_proc_t *fprc = NULL;
590 #endif
591 pid_t ppid = p->p_pid;
592 int error, i;
593
594 ASSERT(curproc == p);
595 #ifdef illumos
596 ASSERT(p->p_proc_flag & P_PR_LOCK);
597 #else
598 PROC_LOCK_ASSERT(p, MA_OWNED);
599 #endif
600 #ifdef illumos
601 ASSERT(p->p_dtrace_count > 0);
602 #else
603 /*
604 * This check is purposely here instead of in kern_fork.c because,
605 * for legal resons, we cannot include the dtrace_cddl.h header
606 * inside kern_fork.c and insert if-clause there.
607 */
608 if (p->p_dtrace_count == 0 && p->p_dtrace_helpers == NULL)
609 return;
610 #endif
611
612 ASSERT(cp->p_dtrace_count == 0);
613
614 /*
615 * This would be simpler and faster if we maintained per-process
616 * hash tables of enabled tracepoints. It could, however, potentially
617 * slow down execution of a tracepoint since we'd need to go
618 * through two levels of indirection. In the future, we should
619 * consider either maintaining per-process ancillary lists of
620 * enabled tracepoints or hanging a pointer to a per-process hash
621 * table of enabled tracepoints off the proc structure.
622 */
623
624 /*
625 * We don't have to worry about the child process disappearing
626 * because we're in fork().
627 */
628 #ifdef illumos
629 mtx_lock_spin(&cp->p_slock);
630 sprlock_proc(cp);
631 mtx_unlock_spin(&cp->p_slock);
632 #else
633 /*
634 * fasttrap_tracepoint_remove() expects the child process to be
635 * unlocked and the VM then expects curproc to be unlocked.
636 */
637 _PHOLD(cp);
638 PROC_UNLOCK(cp);
639 PROC_UNLOCK(p);
640 if (p->p_dtrace_count == 0)
641 goto dup_helpers;
642 #endif
643
644 /*
645 * Iterate over every tracepoint looking for ones that belong to the
646 * parent process, and remove each from the child process.
647 */
648 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
649 fasttrap_tracepoint_t *tp;
650 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
651
652 mutex_enter(&bucket->ftb_mtx);
653 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
654 if (tp->ftt_pid == ppid &&
655 tp->ftt_proc->ftpc_acount != 0) {
656 int ret = fasttrap_tracepoint_remove(cp, tp);
657 ASSERT(ret == 0);
658
659 /*
660 * The count of active providers can only be
661 * decremented (i.e. to zero) during exec,
662 * exit, and removal of a meta provider so it
663 * should be impossible to drop the count
664 * mid-fork.
665 */
666 ASSERT(tp->ftt_proc->ftpc_acount != 0);
667 #ifndef illumos
668 fprc = tp->ftt_proc;
669 #endif
670 }
671 }
672 mutex_exit(&bucket->ftb_mtx);
673
674 #ifndef illumos
675 /*
676 * Unmap any scratch space inherited from the parent's address
677 * space.
678 */
679 if (fprc != NULL) {
680 mutex_enter(&fprc->ftpc_mtx);
681 LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) {
682 error = vm_map_remove(&cp->p_vmspace->vm_map,
683 scrblk->ftsb_addr,
684 scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE);
685 ASSERT(error == KERN_SUCCESS);
686 }
687 mutex_exit(&fprc->ftpc_mtx);
688 }
689 #endif
690 }
691
692 #ifdef illumos
693 mutex_enter(&cp->p_lock);
694 sprunlock(cp);
695 #else
696 dup_helpers:
697 if (p->p_dtrace_helpers != NULL)
698 dtrace_helpers_duplicate(p, cp);
699 PROC_LOCK(p);
700 PROC_LOCK(cp);
701 _PRELE(cp);
702 #endif
703 }
704
705 /*
706 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
707 * is set on the proc structure to indicate that there is a pid provider
708 * associated with this process.
709 */
710 static void
711 fasttrap_exec_exit(proc_t *p)
712 {
713 #ifndef illumos
714 struct thread *td;
715 #endif
716
717 #ifdef illumos
718 ASSERT(p == curproc);
719 #else
720 PROC_LOCK_ASSERT(p, MA_OWNED);
721 _PHOLD(p);
722 /*
723 * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr
724 * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it
725 * ourselves when a process exits.
726 */
727 FOREACH_THREAD_IN_PROC(p, td)
728 td->t_dtrace_sscr = NULL;
729 PROC_UNLOCK(p);
730 #endif
731
732 /*
733 * We clean up the pid provider for this process here; user-land
734 * static probes are handled by the meta-provider remove entry point.
735 */
736 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
737 #ifndef illumos
738 if (p->p_dtrace_helpers)
739 dtrace_helpers_destroy(p);
740 PROC_LOCK(p);
741 _PRELE(p);
742 #endif
743 }
744
745
746 /*ARGSUSED*/
747 static void
748 fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc)
749 {
750 /*
751 * There are no "default" pid probes.
752 */
753 }
754
755 static int
756 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
757 {
758 fasttrap_tracepoint_t *tp, *new_tp = NULL;
759 fasttrap_bucket_t *bucket;
760 fasttrap_id_t *id;
761 pid_t pid;
762 uintptr_t pc;
763
764 ASSERT(index < probe->ftp_ntps);
765
766 pid = probe->ftp_pid;
767 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
768 id = &probe->ftp_tps[index].fit_id;
769
770 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
771
772 #ifdef illumos
773 ASSERT(!(p->p_flag & SVFORK));
774 #endif
775
776 /*
777 * Before we make any modifications, make sure we've imposed a barrier
778 * on the generation in which this probe was last modified.
779 */
780 fasttrap_mod_barrier(probe->ftp_gen);
781
782 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
783
784 /*
785 * If the tracepoint has already been enabled, just add our id to the
786 * list of interested probes. This may be our second time through
787 * this path in which case we'll have constructed the tracepoint we'd
788 * like to install. If we can't find a match, and have an allocated
789 * tracepoint ready to go, enable that one now.
790 *
791 * A tracepoint whose process is defunct is also considered defunct.
792 */
793 again:
794 mutex_enter(&bucket->ftb_mtx);
795 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
796 /*
797 * Note that it's safe to access the active count on the
798 * associated proc structure because we know that at least one
799 * provider (this one) will still be around throughout this
800 * operation.
801 */
802 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
803 tp->ftt_proc->ftpc_acount == 0)
804 continue;
805
806 /*
807 * Now that we've found a matching tracepoint, it would be
808 * a decent idea to confirm that the tracepoint is still
809 * enabled and the trap instruction hasn't been overwritten.
810 * Since this is a little hairy, we'll punt for now.
811 */
812
813 /*
814 * This can't be the first interested probe. We don't have
815 * to worry about another thread being in the midst of
816 * deleting this tracepoint (which would be the only valid
817 * reason for a tracepoint to have no interested probes)
818 * since we're holding P_PR_LOCK for this process.
819 */
820 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
821
822 switch (id->fti_ptype) {
823 case DTFTP_ENTRY:
824 case DTFTP_OFFSETS:
825 case DTFTP_IS_ENABLED:
826 id->fti_next = tp->ftt_ids;
827 membar_producer();
828 tp->ftt_ids = id;
829 membar_producer();
830 break;
831
832 case DTFTP_RETURN:
833 case DTFTP_POST_OFFSETS:
834 id->fti_next = tp->ftt_retids;
835 membar_producer();
836 tp->ftt_retids = id;
837 membar_producer();
838 break;
839
840 default:
841 ASSERT(0);
842 }
843
844 mutex_exit(&bucket->ftb_mtx);
845
846 if (new_tp != NULL) {
847 new_tp->ftt_ids = NULL;
848 new_tp->ftt_retids = NULL;
849 }
850
851 return (0);
852 }
853
854 /*
855 * If we have a good tracepoint ready to go, install it now while
856 * we have the lock held and no one can screw with us.
857 */
858 if (new_tp != NULL) {
859 int rc = 0;
860
861 new_tp->ftt_next = bucket->ftb_data;
862 membar_producer();
863 bucket->ftb_data = new_tp;
864 membar_producer();
865 mutex_exit(&bucket->ftb_mtx);
866
867 /*
868 * Activate the tracepoint in the ISA-specific manner.
869 * If this fails, we need to report the failure, but
870 * indicate that this tracepoint must still be disabled
871 * by calling fasttrap_tracepoint_disable().
872 */
873 if (fasttrap_tracepoint_install(p, new_tp) != 0)
874 rc = FASTTRAP_ENABLE_PARTIAL;
875
876 /*
877 * Increment the count of the number of tracepoints active in
878 * the victim process.
879 */
880 #ifdef illumos
881 ASSERT(p->p_proc_flag & P_PR_LOCK);
882 #endif
883 p->p_dtrace_count++;
884
885 return (rc);
886 }
887
888 mutex_exit(&bucket->ftb_mtx);
889
890 /*
891 * Initialize the tracepoint that's been preallocated with the probe.
892 */
893 new_tp = probe->ftp_tps[index].fit_tp;
894
895 ASSERT(new_tp->ftt_pid == pid);
896 ASSERT(new_tp->ftt_pc == pc);
897 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
898 ASSERT(new_tp->ftt_ids == NULL);
899 ASSERT(new_tp->ftt_retids == NULL);
900
901 switch (id->fti_ptype) {
902 case DTFTP_ENTRY:
903 case DTFTP_OFFSETS:
904 case DTFTP_IS_ENABLED:
905 id->fti_next = NULL;
906 new_tp->ftt_ids = id;
907 break;
908
909 case DTFTP_RETURN:
910 case DTFTP_POST_OFFSETS:
911 id->fti_next = NULL;
912 new_tp->ftt_retids = id;
913 break;
914
915 default:
916 ASSERT(0);
917 }
918
919 #ifdef __FreeBSD__
920 if (SV_PROC_FLAG(p, SV_LP64))
921 p->p_model = DATAMODEL_LP64;
922 else
923 p->p_model = DATAMODEL_ILP32;
924 #endif
925
926 /*
927 * If the ISA-dependent initialization goes to plan, go back to the
928 * beginning and try to install this freshly made tracepoint.
929 */
930 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
931 goto again;
932
933 new_tp->ftt_ids = NULL;
934 new_tp->ftt_retids = NULL;
935
936 return (FASTTRAP_ENABLE_FAIL);
937 }
938
939 static void
940 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
941 {
942 fasttrap_bucket_t *bucket;
943 fasttrap_provider_t *provider = probe->ftp_prov;
944 fasttrap_tracepoint_t **pp, *tp;
945 fasttrap_id_t *id, **idp = NULL;
946 pid_t pid;
947 uintptr_t pc;
948
949 ASSERT(index < probe->ftp_ntps);
950
951 pid = probe->ftp_pid;
952 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
953 id = &probe->ftp_tps[index].fit_id;
954
955 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
956
957 /*
958 * Find the tracepoint and make sure that our id is one of the
959 * ones registered with it.
960 */
961 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
962 mutex_enter(&bucket->ftb_mtx);
963 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
964 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
965 tp->ftt_proc == provider->ftp_proc)
966 break;
967 }
968
969 /*
970 * If we somehow lost this tracepoint, we're in a world of hurt.
971 */
972 ASSERT(tp != NULL);
973
974 switch (id->fti_ptype) {
975 case DTFTP_ENTRY:
976 case DTFTP_OFFSETS:
977 case DTFTP_IS_ENABLED:
978 ASSERT(tp->ftt_ids != NULL);
979 idp = &tp->ftt_ids;
980 break;
981
982 case DTFTP_RETURN:
983 case DTFTP_POST_OFFSETS:
984 ASSERT(tp->ftt_retids != NULL);
985 idp = &tp->ftt_retids;
986 break;
987
988 default:
989 ASSERT(0);
990 }
991
992 while ((*idp)->fti_probe != probe) {
993 idp = &(*idp)->fti_next;
994 ASSERT(*idp != NULL);
995 }
996
997 id = *idp;
998 *idp = id->fti_next;
999 membar_producer();
1000
1001 ASSERT(id->fti_probe == probe);
1002
1003 /*
1004 * If there are other registered enablings of this tracepoint, we're
1005 * all done, but if this was the last probe assocated with this
1006 * this tracepoint, we need to remove and free it.
1007 */
1008 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1009
1010 /*
1011 * If the current probe's tracepoint is in use, swap it
1012 * for an unused tracepoint.
1013 */
1014 if (tp == probe->ftp_tps[index].fit_tp) {
1015 fasttrap_probe_t *tmp_probe;
1016 fasttrap_tracepoint_t **tmp_tp;
1017 uint_t tmp_index;
1018
1019 if (tp->ftt_ids != NULL) {
1020 tmp_probe = tp->ftt_ids->fti_probe;
1021 /* LINTED - alignment */
1022 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1023 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1024 } else {
1025 tmp_probe = tp->ftt_retids->fti_probe;
1026 /* LINTED - alignment */
1027 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1028 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1029 }
1030
1031 ASSERT(*tmp_tp != NULL);
1032 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1033 ASSERT((*tmp_tp)->ftt_ids == NULL);
1034 ASSERT((*tmp_tp)->ftt_retids == NULL);
1035
1036 probe->ftp_tps[index].fit_tp = *tmp_tp;
1037 *tmp_tp = tp;
1038 }
1039
1040 mutex_exit(&bucket->ftb_mtx);
1041
1042 /*
1043 * Tag the modified probe with the generation in which it was
1044 * changed.
1045 */
1046 probe->ftp_gen = fasttrap_mod_gen;
1047 return;
1048 }
1049
1050 mutex_exit(&bucket->ftb_mtx);
1051
1052 /*
1053 * We can't safely remove the tracepoint from the set of active
1054 * tracepoints until we've actually removed the fasttrap instruction
1055 * from the process's text. We can, however, operate on this
1056 * tracepoint secure in the knowledge that no other thread is going to
1057 * be looking at it since we hold P_PR_LOCK on the process if it's
1058 * live or we hold the provider lock on the process if it's dead and
1059 * gone.
1060 */
1061
1062 /*
1063 * We only need to remove the actual instruction if we're looking
1064 * at an existing process
1065 */
1066 if (p != NULL) {
1067 /*
1068 * If we fail to restore the instruction we need to kill
1069 * this process since it's in a completely unrecoverable
1070 * state.
1071 */
1072 if (fasttrap_tracepoint_remove(p, tp) != 0)
1073 fasttrap_sigtrap(p, NULL, pc);
1074
1075 /*
1076 * Decrement the count of the number of tracepoints active
1077 * in the victim process.
1078 */
1079 #ifdef illumos
1080 ASSERT(p->p_proc_flag & P_PR_LOCK);
1081 #endif
1082 p->p_dtrace_count--;
1083
1084 atomic_add_rel_64(&p->p_fasttrap_tp_gen, 1);
1085 }
1086
1087 /*
1088 * Remove the probe from the hash table of active tracepoints.
1089 */
1090 mutex_enter(&bucket->ftb_mtx);
1091 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1092 ASSERT(*pp != NULL);
1093 while (*pp != tp) {
1094 pp = &(*pp)->ftt_next;
1095 ASSERT(*pp != NULL);
1096 }
1097
1098 *pp = tp->ftt_next;
1099 membar_producer();
1100
1101 mutex_exit(&bucket->ftb_mtx);
1102
1103 /*
1104 * Tag the modified probe with the generation in which it was changed.
1105 */
1106 probe->ftp_gen = fasttrap_mod_gen;
1107 }
1108
1109 static void
1110 fasttrap_enable_callbacks(void)
1111 {
1112 /*
1113 * We don't have to play the rw lock game here because we're
1114 * providing something rather than taking something away --
1115 * we can be sure that no threads have tried to follow this
1116 * function pointer yet.
1117 */
1118 mutex_enter(&fasttrap_count_mtx);
1119 if (fasttrap_pid_count == 0) {
1120 ASSERT(dtrace_pid_probe_ptr == NULL);
1121 ASSERT(dtrace_return_probe_ptr == NULL);
1122 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1123 dtrace_return_probe_ptr = &fasttrap_return_probe;
1124 }
1125 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1126 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1127 fasttrap_pid_count++;
1128 mutex_exit(&fasttrap_count_mtx);
1129 }
1130
1131 static void
1132 fasttrap_disable_callbacks(void)
1133 {
1134 mutex_enter(&fasttrap_count_mtx);
1135 ASSERT(fasttrap_pid_count > 0);
1136 fasttrap_pid_count--;
1137 if (fasttrap_pid_count == 0) {
1138 /*
1139 * Synchronize with the breakpoint handler, which is careful to
1140 * enable interrupts only after loading the hook pointer.
1141 */
1142 dtrace_sync();
1143 dtrace_pid_probe_ptr = NULL;
1144 dtrace_return_probe_ptr = NULL;
1145 }
1146 mutex_exit(&fasttrap_count_mtx);
1147 }
1148
1149 /*ARGSUSED*/
1150 static void
1151 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1152 {
1153 fasttrap_probe_t *probe = parg;
1154 proc_t *p = NULL;
1155 int i, rc;
1156
1157 ASSERT(probe != NULL);
1158 ASSERT(!probe->ftp_enabled);
1159 ASSERT(id == probe->ftp_id);
1160 #ifdef illumos
1161 ASSERT(MUTEX_HELD(&cpu_lock));
1162 #endif
1163
1164 /*
1165 * Increment the count of enabled probes on this probe's provider;
1166 * the provider can't go away while the probe still exists. We
1167 * must increment this even if we aren't able to properly enable
1168 * this probe.
1169 */
1170 mutex_enter(&probe->ftp_prov->ftp_mtx);
1171 probe->ftp_prov->ftp_rcount++;
1172 mutex_exit(&probe->ftp_prov->ftp_mtx);
1173
1174 /*
1175 * If this probe's provider is retired (meaning it was valid in a
1176 * previously exec'ed incarnation of this address space), bail out. The
1177 * provider can't go away while we're in this code path.
1178 */
1179 if (probe->ftp_prov->ftp_retired)
1180 return;
1181
1182 /*
1183 * If we can't find the process, it may be that we're in the context of
1184 * a fork in which the traced process is being born and we're copying
1185 * USDT probes. Otherwise, the process is gone so bail.
1186 */
1187 #ifdef illumos
1188 if ((p = sprlock(probe->ftp_pid)) == NULL) {
1189 if ((curproc->p_flag & SFORKING) == 0)
1190 return;
1191
1192 mutex_enter(&pidlock);
1193 p = prfind(probe->ftp_pid);
1194
1195 if (p == NULL) {
1196 /*
1197 * So it's not that the target process is being born,
1198 * it's that it isn't there at all (and we simply
1199 * happen to be forking). Anyway, we know that the
1200 * target is definitely gone, so bail out.
1201 */
1202 mutex_exit(&pidlock);
1203 return (0);
1204 }
1205
1206 /*
1207 * Confirm that curproc is indeed forking the process in which
1208 * we're trying to enable probes.
1209 */
1210 ASSERT(p->p_parent == curproc);
1211 ASSERT(p->p_stat == SIDL);
1212
1213 mutex_enter(&p->p_lock);
1214 mutex_exit(&pidlock);
1215
1216 sprlock_proc(p);
1217 }
1218
1219 ASSERT(!(p->p_flag & SVFORK));
1220 mutex_exit(&p->p_lock);
1221 #else
1222 if (pget(probe->ftp_pid, PGET_HOLD | PGET_NOTWEXIT, &p) != 0)
1223 return;
1224 #endif
1225
1226 /*
1227 * We have to enable the trap entry point before any user threads have
1228 * the chance to execute the trap instruction we're about to place
1229 * in their process's text.
1230 */
1231 fasttrap_enable_callbacks();
1232
1233 /*
1234 * Enable all the tracepoints and add this probe's id to each
1235 * tracepoint's list of active probes.
1236 */
1237 for (i = 0; i < probe->ftp_ntps; i++) {
1238 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1239 /*
1240 * If enabling the tracepoint failed completely,
1241 * we don't have to disable it; if the failure
1242 * was only partial we must disable it.
1243 */
1244 if (rc == FASTTRAP_ENABLE_FAIL)
1245 i--;
1246 else
1247 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1248
1249 /*
1250 * Back up and pull out all the tracepoints we've
1251 * created so far for this probe.
1252 */
1253 while (i >= 0) {
1254 fasttrap_tracepoint_disable(p, probe, i);
1255 i--;
1256 }
1257
1258 #ifdef illumos
1259 mutex_enter(&p->p_lock);
1260 sprunlock(p);
1261 #else
1262 PRELE(p);
1263 #endif
1264
1265 /*
1266 * Since we're not actually enabling this probe,
1267 * drop our reference on the trap table entry.
1268 */
1269 fasttrap_disable_callbacks();
1270 return;
1271 }
1272 }
1273 #ifdef illumos
1274 mutex_enter(&p->p_lock);
1275 sprunlock(p);
1276 #else
1277 PRELE(p);
1278 #endif
1279
1280 probe->ftp_enabled = 1;
1281 }
1282
1283 /*ARGSUSED*/
1284 static void
1285 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1286 {
1287 fasttrap_probe_t *probe = parg;
1288 fasttrap_provider_t *provider = probe->ftp_prov;
1289 proc_t *p;
1290 int i, whack = 0;
1291
1292 ASSERT(id == probe->ftp_id);
1293
1294 mutex_enter(&provider->ftp_mtx);
1295
1296 /*
1297 * We won't be able to acquire a /proc-esque lock on the process
1298 * iff the process is dead and gone. In this case, we rely on the
1299 * provider lock as a point of mutual exclusion to prevent other
1300 * DTrace consumers from disabling this probe.
1301 */
1302 if (pget(probe->ftp_pid, PGET_HOLD | PGET_NOTWEXIT, &p) != 0)
1303 p = NULL;
1304
1305 /*
1306 * Disable all the associated tracepoints (for fully enabled probes).
1307 */
1308 if (probe->ftp_enabled) {
1309 for (i = 0; i < probe->ftp_ntps; i++) {
1310 fasttrap_tracepoint_disable(p, probe, i);
1311 }
1312 }
1313
1314 ASSERT(provider->ftp_rcount > 0);
1315 provider->ftp_rcount--;
1316
1317 if (p != NULL) {
1318 /*
1319 * Even though we may not be able to remove it entirely, we
1320 * mark this retired provider to get a chance to remove some
1321 * of the associated probes.
1322 */
1323 if (provider->ftp_retired && !provider->ftp_marked)
1324 whack = provider->ftp_marked = 1;
1325 mutex_exit(&provider->ftp_mtx);
1326 } else {
1327 /*
1328 * If the process is dead, we're just waiting for the
1329 * last probe to be disabled to be able to free it.
1330 */
1331 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1332 whack = provider->ftp_marked = 1;
1333 mutex_exit(&provider->ftp_mtx);
1334 }
1335
1336 if (whack)
1337 fasttrap_pid_cleanup();
1338
1339 #ifdef __FreeBSD__
1340 if (p != NULL)
1341 PRELE(p);
1342 #endif
1343 if (!probe->ftp_enabled)
1344 return;
1345
1346 probe->ftp_enabled = 0;
1347
1348 #ifdef illumos
1349 ASSERT(MUTEX_HELD(&cpu_lock));
1350 #endif
1351 fasttrap_disable_callbacks();
1352 }
1353
1354 /*ARGSUSED*/
1355 static void
1356 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1357 dtrace_argdesc_t *desc)
1358 {
1359 fasttrap_probe_t *probe = parg;
1360 char *str;
1361 int i, ndx;
1362
1363 desc->dtargd_native[0] = '\0';
1364 desc->dtargd_xlate[0] = '\0';
1365
1366 if (probe->ftp_prov->ftp_retired != 0 ||
1367 desc->dtargd_ndx >= probe->ftp_nargs) {
1368 desc->dtargd_ndx = DTRACE_ARGNONE;
1369 return;
1370 }
1371
1372 ndx = (probe->ftp_argmap != NULL) ?
1373 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1374
1375 str = probe->ftp_ntypes;
1376 for (i = 0; i < ndx; i++) {
1377 str += strlen(str) + 1;
1378 }
1379
1380 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1381 (void) strcpy(desc->dtargd_native, str);
1382
1383 if (probe->ftp_xtypes == NULL)
1384 return;
1385
1386 str = probe->ftp_xtypes;
1387 for (i = 0; i < desc->dtargd_ndx; i++) {
1388 str += strlen(str) + 1;
1389 }
1390
1391 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1392 (void) strcpy(desc->dtargd_xlate, str);
1393 }
1394
1395 /*ARGSUSED*/
1396 static void
1397 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1398 {
1399 fasttrap_probe_t *probe = parg;
1400 int i;
1401 size_t size;
1402
1403 ASSERT(probe != NULL);
1404 ASSERT(!probe->ftp_enabled);
1405 ASSERT(fasttrap_total >= probe->ftp_ntps);
1406
1407 atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1408 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1409
1410 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1411 fasttrap_mod_barrier(probe->ftp_gen);
1412
1413 for (i = 0; i < probe->ftp_ntps; i++) {
1414 kmem_free(probe->ftp_tps[i].fit_tp,
1415 sizeof (fasttrap_tracepoint_t));
1416 }
1417
1418 kmem_free(probe, size);
1419 }
1420
1421
1422 static const dtrace_pattr_t pid_attr = {
1423 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1424 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1425 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1426 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1427 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1428 };
1429
1430 static dtrace_pops_t pid_pops = {
1431 .dtps_provide = fasttrap_pid_provide,
1432 .dtps_provide_module = NULL,
1433 .dtps_enable = fasttrap_pid_enable,
1434 .dtps_disable = fasttrap_pid_disable,
1435 .dtps_suspend = NULL,
1436 .dtps_resume = NULL,
1437 .dtps_getargdesc = fasttrap_pid_getargdesc,
1438 .dtps_getargval = fasttrap_pid_getarg,
1439 .dtps_usermode = NULL,
1440 .dtps_destroy = fasttrap_pid_destroy
1441 };
1442
1443 static dtrace_pops_t usdt_pops = {
1444 .dtps_provide = fasttrap_pid_provide,
1445 .dtps_provide_module = NULL,
1446 .dtps_enable = fasttrap_pid_enable,
1447 .dtps_disable = fasttrap_pid_disable,
1448 .dtps_suspend = NULL,
1449 .dtps_resume = NULL,
1450 .dtps_getargdesc = fasttrap_pid_getargdesc,
1451 .dtps_getargval = fasttrap_usdt_getarg,
1452 .dtps_usermode = NULL,
1453 .dtps_destroy = fasttrap_pid_destroy
1454 };
1455
1456 static fasttrap_proc_t *
1457 fasttrap_proc_lookup(pid_t pid)
1458 {
1459 fasttrap_bucket_t *bucket;
1460 fasttrap_proc_t *fprc, *new_fprc;
1461
1462
1463 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1464 mutex_enter(&bucket->ftb_mtx);
1465
1466 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1467 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1468 mutex_enter(&fprc->ftpc_mtx);
1469 mutex_exit(&bucket->ftb_mtx);
1470 fprc->ftpc_rcount++;
1471 atomic_inc_64(&fprc->ftpc_acount);
1472 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1473 mutex_exit(&fprc->ftpc_mtx);
1474
1475 return (fprc);
1476 }
1477 }
1478
1479 /*
1480 * Drop the bucket lock so we don't try to perform a sleeping
1481 * allocation under it.
1482 */
1483 mutex_exit(&bucket->ftb_mtx);
1484
1485 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1486 new_fprc->ftpc_pid = pid;
1487 new_fprc->ftpc_rcount = 1;
1488 new_fprc->ftpc_acount = 1;
1489 #ifndef illumos
1490 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT,
1491 NULL);
1492 #endif
1493
1494 mutex_enter(&bucket->ftb_mtx);
1495
1496 /*
1497 * Take another lap through the list to make sure a proc hasn't
1498 * been created for this pid while we weren't under the bucket lock.
1499 */
1500 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1501 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1502 mutex_enter(&fprc->ftpc_mtx);
1503 mutex_exit(&bucket->ftb_mtx);
1504 fprc->ftpc_rcount++;
1505 atomic_inc_64(&fprc->ftpc_acount);
1506 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1507 mutex_exit(&fprc->ftpc_mtx);
1508
1509 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1510
1511 return (fprc);
1512 }
1513 }
1514
1515 new_fprc->ftpc_next = bucket->ftb_data;
1516 bucket->ftb_data = new_fprc;
1517
1518 mutex_exit(&bucket->ftb_mtx);
1519
1520 return (new_fprc);
1521 }
1522
1523 static void
1524 fasttrap_proc_release(fasttrap_proc_t *proc)
1525 {
1526 fasttrap_bucket_t *bucket;
1527 fasttrap_proc_t *fprc, **fprcp;
1528 pid_t pid = proc->ftpc_pid;
1529 #ifndef illumos
1530 fasttrap_scrblock_t *scrblk, *scrblktmp;
1531 fasttrap_scrspace_t *scrspc, *scrspctmp;
1532 struct proc *p;
1533 struct thread *td;
1534 #endif
1535
1536 mutex_enter(&proc->ftpc_mtx);
1537
1538 ASSERT(proc->ftpc_rcount != 0);
1539 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1540
1541 if (--proc->ftpc_rcount != 0) {
1542 mutex_exit(&proc->ftpc_mtx);
1543 return;
1544 }
1545
1546 #ifndef illumos
1547 /*
1548 * Free all structures used to manage per-thread scratch space.
1549 */
1550 LIST_FOREACH_SAFE(scrblk, &proc->ftpc_scrblks, ftsb_next,
1551 scrblktmp) {
1552 LIST_REMOVE(scrblk, ftsb_next);
1553 free(scrblk, M_SOLARIS);
1554 }
1555 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_fscr, ftss_next, scrspctmp) {
1556 LIST_REMOVE(scrspc, ftss_next);
1557 free(scrspc, M_SOLARIS);
1558 }
1559 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_ascr, ftss_next, scrspctmp) {
1560 LIST_REMOVE(scrspc, ftss_next);
1561 free(scrspc, M_SOLARIS);
1562 }
1563
1564 if ((p = pfind(pid)) != NULL) {
1565 FOREACH_THREAD_IN_PROC(p, td)
1566 td->t_dtrace_sscr = NULL;
1567 PROC_UNLOCK(p);
1568 }
1569 #endif
1570
1571 mutex_exit(&proc->ftpc_mtx);
1572
1573 /*
1574 * There should definitely be no live providers associated with this
1575 * process at this point.
1576 */
1577 ASSERT(proc->ftpc_acount == 0);
1578
1579 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1580 mutex_enter(&bucket->ftb_mtx);
1581
1582 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1583 while ((fprc = *fprcp) != NULL) {
1584 if (fprc == proc)
1585 break;
1586
1587 fprcp = &fprc->ftpc_next;
1588 }
1589
1590 /*
1591 * Something strange has happened if we can't find the proc.
1592 */
1593 ASSERT(fprc != NULL);
1594
1595 *fprcp = fprc->ftpc_next;
1596
1597 mutex_exit(&bucket->ftb_mtx);
1598
1599 kmem_free(fprc, sizeof (fasttrap_proc_t));
1600 }
1601
1602 /*
1603 * Lookup a fasttrap-managed provider based on its name and associated pid.
1604 * If the pattr argument is non-NULL, this function instantiates the provider
1605 * if it doesn't exist otherwise it returns NULL. The provider is returned
1606 * with its lock held.
1607 */
1608 static fasttrap_provider_t *
1609 fasttrap_provider_lookup(pid_t pid, const char *name,
1610 const dtrace_pattr_t *pattr)
1611 {
1612 fasttrap_provider_t *fp, *new_fp = NULL;
1613 fasttrap_bucket_t *bucket;
1614 char provname[DTRACE_PROVNAMELEN];
1615 proc_t *p;
1616 cred_t *cred;
1617
1618 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1619 ASSERT(pattr != NULL);
1620
1621 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1622 mutex_enter(&bucket->ftb_mtx);
1623
1624 /*
1625 * Take a lap through the list and return the match if we find it.
1626 */
1627 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1628 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1629 !fp->ftp_retired) {
1630 mutex_enter(&fp->ftp_mtx);
1631 mutex_exit(&bucket->ftb_mtx);
1632 return (fp);
1633 }
1634 }
1635
1636 /*
1637 * Drop the bucket lock so we don't try to perform a sleeping
1638 * allocation under it.
1639 */
1640 mutex_exit(&bucket->ftb_mtx);
1641
1642 /*
1643 * Make sure the process exists, isn't a child created as the result
1644 * of a vfork(2), and isn't a zombie (but may be in fork).
1645 */
1646 if ((p = pfind(pid)) == NULL)
1647 return (NULL);
1648
1649 /*
1650 * Increment p_dtrace_probes so that the process knows to inform us
1651 * when it exits or execs. fasttrap_provider_free() decrements this
1652 * when we're done with this provider.
1653 */
1654 p->p_dtrace_probes++;
1655
1656 /*
1657 * Grab the credentials for this process so we have
1658 * something to pass to dtrace_register().
1659 */
1660 PROC_LOCK_ASSERT(p, MA_OWNED);
1661 crhold(p->p_ucred);
1662 cred = p->p_ucred;
1663 PROC_UNLOCK(p);
1664
1665 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1666 new_fp->ftp_pid = pid;
1667 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1668 #ifndef illumos
1669 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL);
1670 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL);
1671 #endif
1672
1673 ASSERT(new_fp->ftp_proc != NULL);
1674
1675 mutex_enter(&bucket->ftb_mtx);
1676
1677 /*
1678 * Take another lap through the list to make sure a provider hasn't
1679 * been created for this pid while we weren't under the bucket lock.
1680 */
1681 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1682 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1683 !fp->ftp_retired) {
1684 mutex_enter(&fp->ftp_mtx);
1685 mutex_exit(&bucket->ftb_mtx);
1686 fasttrap_provider_free(new_fp);
1687 crfree(cred);
1688 return (fp);
1689 }
1690 }
1691
1692 (void) strcpy(new_fp->ftp_name, name);
1693
1694 /*
1695 * Fail and return NULL if either the provider name is too long
1696 * or we fail to register this new provider with the DTrace
1697 * framework. Note that this is the only place we ever construct
1698 * the full provider name -- we keep it in pieces in the provider
1699 * structure.
1700 */
1701 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1702 sizeof (provname) ||
1703 dtrace_register(provname, pattr,
1704 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1705 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1706 &new_fp->ftp_provid) != 0) {
1707 mutex_exit(&bucket->ftb_mtx);
1708 fasttrap_provider_free(new_fp);
1709 crfree(cred);
1710 return (NULL);
1711 }
1712
1713 new_fp->ftp_next = bucket->ftb_data;
1714 bucket->ftb_data = new_fp;
1715
1716 mutex_enter(&new_fp->ftp_mtx);
1717 mutex_exit(&bucket->ftb_mtx);
1718
1719 crfree(cred);
1720 return (new_fp);
1721 }
1722
1723 static void
1724 fasttrap_provider_free(fasttrap_provider_t *provider)
1725 {
1726 pid_t pid = provider->ftp_pid;
1727 proc_t *p;
1728
1729 /*
1730 * There need to be no associated enabled probes, no consumers
1731 * creating probes, and no meta providers referencing this provider.
1732 */
1733 ASSERT(provider->ftp_rcount == 0);
1734 ASSERT(provider->ftp_ccount == 0);
1735 ASSERT(provider->ftp_mcount == 0);
1736
1737 /*
1738 * If this provider hasn't been retired, we need to explicitly drop the
1739 * count of active providers on the associated process structure.
1740 */
1741 if (!provider->ftp_retired) {
1742 atomic_dec_64(&provider->ftp_proc->ftpc_acount);
1743 ASSERT(provider->ftp_proc->ftpc_acount <
1744 provider->ftp_proc->ftpc_rcount);
1745 }
1746
1747 fasttrap_proc_release(provider->ftp_proc);
1748
1749 #ifndef illumos
1750 mutex_destroy(&provider->ftp_mtx);
1751 mutex_destroy(&provider->ftp_cmtx);
1752 #endif
1753 kmem_free(provider, sizeof (fasttrap_provider_t));
1754
1755 /*
1756 * Decrement p_dtrace_probes on the process whose provider we're
1757 * freeing. We don't have to worry about clobbering somone else's
1758 * modifications to it because we have locked the bucket that
1759 * corresponds to this process's hash chain in the provider hash
1760 * table. Don't sweat it if we can't find the process.
1761 */
1762 if ((p = pfind(pid)) == NULL) {
1763 return;
1764 }
1765
1766 p->p_dtrace_probes--;
1767 #ifndef illumos
1768 PROC_UNLOCK(p);
1769 #endif
1770 }
1771
1772 static void
1773 fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1774 {
1775 fasttrap_provider_t *fp;
1776 fasttrap_bucket_t *bucket;
1777 dtrace_provider_id_t provid;
1778
1779 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1780
1781 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1782 mutex_enter(&bucket->ftb_mtx);
1783
1784 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1785 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1786 !fp->ftp_retired)
1787 break;
1788 }
1789
1790 if (fp == NULL) {
1791 mutex_exit(&bucket->ftb_mtx);
1792 return;
1793 }
1794
1795 mutex_enter(&fp->ftp_mtx);
1796 ASSERT(!mprov || fp->ftp_mcount > 0);
1797 if (mprov && --fp->ftp_mcount != 0) {
1798 mutex_exit(&fp->ftp_mtx);
1799 mutex_exit(&bucket->ftb_mtx);
1800 return;
1801 }
1802
1803 /*
1804 * Mark the provider to be removed in our post-processing step, mark it
1805 * retired, and drop the active count on its proc. Marking it indicates
1806 * that we should try to remove it; setting the retired flag indicates
1807 * that we're done with this provider; dropping the active the proc
1808 * releases our hold, and when this reaches zero (as it will during
1809 * exit or exec) the proc and associated providers become defunct.
1810 *
1811 * We obviously need to take the bucket lock before the provider lock
1812 * to perform the lookup, but we need to drop the provider lock
1813 * before calling into the DTrace framework since we acquire the
1814 * provider lock in callbacks invoked from the DTrace framework. The
1815 * bucket lock therefore protects the integrity of the provider hash
1816 * table.
1817 */
1818 atomic_dec_64(&fp->ftp_proc->ftpc_acount);
1819 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1820
1821 fp->ftp_retired = 1;
1822 fp->ftp_marked = 1;
1823 provid = fp->ftp_provid;
1824 mutex_exit(&fp->ftp_mtx);
1825
1826 /*
1827 * We don't have to worry about invalidating the same provider twice
1828 * since fasttrap_provider_lookup() will ignore provider that have
1829 * been marked as retired.
1830 */
1831 dtrace_invalidate(provid);
1832
1833 mutex_exit(&bucket->ftb_mtx);
1834
1835 fasttrap_pid_cleanup();
1836 }
1837
1838 static int
1839 fasttrap_uint32_cmp(const void *ap, const void *bp)
1840 {
1841 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1842 }
1843
1844 static int
1845 fasttrap_uint64_cmp(const void *ap, const void *bp)
1846 {
1847 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1848 }
1849
1850 static int
1851 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1852 {
1853 fasttrap_provider_t *provider;
1854 fasttrap_probe_t *pp;
1855 fasttrap_tracepoint_t *tp;
1856 char *name;
1857 int i, aframes = 0, whack;
1858
1859 /*
1860 * There needs to be at least one desired trace point.
1861 */
1862 if (pdata->ftps_noffs == 0)
1863 return (EINVAL);
1864
1865 switch (pdata->ftps_type) {
1866 case DTFTP_ENTRY:
1867 name = "entry";
1868 aframes = FASTTRAP_ENTRY_AFRAMES;
1869 break;
1870 case DTFTP_RETURN:
1871 name = "return";
1872 aframes = FASTTRAP_RETURN_AFRAMES;
1873 break;
1874 case DTFTP_OFFSETS:
1875 name = NULL;
1876 break;
1877 default:
1878 return (EINVAL);
1879 }
1880
1881 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1882 FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1883 return (ESRCH);
1884
1885 /*
1886 * Increment this reference count to indicate that a consumer is
1887 * actively adding a new probe associated with this provider. This
1888 * prevents the provider from being deleted -- we'll need to check
1889 * for pending deletions when we drop this reference count.
1890 */
1891 provider->ftp_ccount++;
1892 mutex_exit(&provider->ftp_mtx);
1893
1894 /*
1895 * Grab the creation lock to ensure consistency between calls to
1896 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1897 * other threads creating probes. We must drop the provider lock
1898 * before taking this lock to avoid a three-way deadlock with the
1899 * DTrace framework.
1900 */
1901 mutex_enter(&provider->ftp_cmtx);
1902
1903 if (name == NULL) {
1904 for (i = 0; i < pdata->ftps_noffs; i++) {
1905 char name_str[17];
1906
1907 (void) sprintf(name_str, "%llx",
1908 (unsigned long long)pdata->ftps_offs[i]);
1909
1910 if (dtrace_probe_lookup(provider->ftp_provid,
1911 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1912 continue;
1913
1914 atomic_inc_32(&fasttrap_total);
1915
1916 if (fasttrap_total > fasttrap_max) {
1917 atomic_dec_32(&fasttrap_total);
1918 goto no_mem;
1919 }
1920
1921 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1922
1923 pp->ftp_prov = provider;
1924 pp->ftp_faddr = pdata->ftps_pc;
1925 pp->ftp_fsize = pdata->ftps_size;
1926 pp->ftp_pid = pdata->ftps_pid;
1927 pp->ftp_ntps = 1;
1928
1929 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1930 KM_SLEEP);
1931
1932 tp->ftt_proc = provider->ftp_proc;
1933 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1934 tp->ftt_pid = pdata->ftps_pid;
1935
1936 pp->ftp_tps[0].fit_tp = tp;
1937 pp->ftp_tps[0].fit_id.fti_probe = pp;
1938 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1939
1940 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1941 pdata->ftps_mod, pdata->ftps_func, name_str,
1942 FASTTRAP_OFFSET_AFRAMES, pp);
1943 }
1944
1945 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1946 pdata->ftps_func, name) == 0) {
1947 atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1948
1949 if (fasttrap_total > fasttrap_max) {
1950 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1951 goto no_mem;
1952 }
1953
1954 /*
1955 * Make sure all tracepoint program counter values are unique.
1956 * We later assume that each probe has exactly one tracepoint
1957 * for a given pc.
1958 */
1959 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1960 sizeof (uint64_t), fasttrap_uint64_cmp);
1961 for (i = 1; i < pdata->ftps_noffs; i++) {
1962 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1963 continue;
1964
1965 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1966 goto no_mem;
1967 }
1968
1969 ASSERT(pdata->ftps_noffs > 0);
1970 pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1971 ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1972
1973 pp->ftp_prov = provider;
1974 pp->ftp_faddr = pdata->ftps_pc;
1975 pp->ftp_fsize = pdata->ftps_size;
1976 pp->ftp_pid = pdata->ftps_pid;
1977 pp->ftp_ntps = pdata->ftps_noffs;
1978
1979 for (i = 0; i < pdata->ftps_noffs; i++) {
1980 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1981 KM_SLEEP);
1982
1983 tp->ftt_proc = provider->ftp_proc;
1984 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1985 tp->ftt_pid = pdata->ftps_pid;
1986
1987 pp->ftp_tps[i].fit_tp = tp;
1988 pp->ftp_tps[i].fit_id.fti_probe = pp;
1989 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1990 }
1991
1992 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1993 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1994 }
1995
1996 mutex_exit(&provider->ftp_cmtx);
1997
1998 /*
1999 * We know that the provider is still valid since we incremented the
2000 * creation reference count. If someone tried to clean up this provider
2001 * while we were using it (e.g. because the process called exec(2) or
2002 * exit(2)), take note of that and try to clean it up now.
2003 */
2004 mutex_enter(&provider->ftp_mtx);
2005 provider->ftp_ccount--;
2006 whack = provider->ftp_retired;
2007 mutex_exit(&provider->ftp_mtx);
2008
2009 if (whack)
2010 fasttrap_pid_cleanup();
2011
2012 return (0);
2013
2014 no_mem:
2015 /*
2016 * If we've exhausted the allowable resources, we'll try to remove
2017 * this provider to free some up. This is to cover the case where
2018 * the user has accidentally created many more probes than was
2019 * intended (e.g. pid123:::).
2020 */
2021 mutex_exit(&provider->ftp_cmtx);
2022 mutex_enter(&provider->ftp_mtx);
2023 provider->ftp_ccount--;
2024 provider->ftp_marked = 1;
2025 mutex_exit(&provider->ftp_mtx);
2026
2027 fasttrap_pid_cleanup();
2028
2029 return (ENOMEM);
2030 }
2031
2032 /*ARGSUSED*/
2033 static void *
2034 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2035 {
2036 fasttrap_provider_t *provider;
2037
2038 /*
2039 * A 32-bit unsigned integer (like a pid for example) can be
2040 * expressed in 10 or fewer decimal digits. Make sure that we'll
2041 * have enough space for the provider name.
2042 */
2043 if (strlen(dhpv->dthpv_provname) + 10 >=
2044 sizeof (provider->ftp_name)) {
2045 printf("failed to instantiate provider %s: "
2046 "name too long to accomodate pid", dhpv->dthpv_provname);
2047 return (NULL);
2048 }
2049
2050 /*
2051 * Don't let folks spoof the true pid provider.
2052 */
2053 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
2054 printf("failed to instantiate provider %s: "
2055 "%s is an invalid name", dhpv->dthpv_provname,
2056 FASTTRAP_PID_NAME);
2057 return (NULL);
2058 }
2059
2060 /*
2061 * The highest stability class that fasttrap supports is ISA; cap
2062 * the stability of the new provider accordingly.
2063 */
2064 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2065 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2066 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2067 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2068 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2069 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2070 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2071 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2072 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2073 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2074
2075 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
2076 &dhpv->dthpv_pattr)) == NULL) {
2077 printf("failed to instantiate provider %s for "
2078 "process %u", dhpv->dthpv_provname, (uint_t)pid);
2079 return (NULL);
2080 }
2081
2082 /*
2083 * Up the meta provider count so this provider isn't removed until
2084 * the meta provider has been told to remove it.
2085 */
2086 provider->ftp_mcount++;
2087
2088 mutex_exit(&provider->ftp_mtx);
2089
2090 return (provider);
2091 }
2092
2093 /*
2094 * We know a few things about our context here: we know that the probe being
2095 * created doesn't already exist (DTrace won't load DOF at the same address
2096 * twice, even if explicitly told to do so) and we know that we are
2097 * single-threaded with respect to the meta provider machinery. Knowing that
2098 * this is a new probe and that there is no way for us to race with another
2099 * operation on this provider allows us an important optimization: we need not
2100 * lookup a probe before adding it. Saving this lookup is important because
2101 * this code is in the fork path for processes with USDT probes, and lookups
2102 * here are potentially very expensive because of long hash conflicts on
2103 * module, function and name (DTrace doesn't hash on provider name).
2104 */
2105 /*ARGSUSED*/
2106 static void
2107 fasttrap_meta_create_probe(void *arg, void *parg,
2108 dtrace_helper_probedesc_t *dhpb)
2109 {
2110 fasttrap_provider_t *provider = parg;
2111 fasttrap_probe_t *pp;
2112 fasttrap_tracepoint_t *tp;
2113 int i, j;
2114 uint32_t ntps;
2115
2116 /*
2117 * Since the meta provider count is non-zero we don't have to worry
2118 * about this provider disappearing.
2119 */
2120 ASSERT(provider->ftp_mcount > 0);
2121
2122 /*
2123 * The offsets must be unique.
2124 */
2125 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2126 fasttrap_uint32_cmp);
2127 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2128 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2129 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2130 return;
2131 }
2132
2133 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2134 fasttrap_uint32_cmp);
2135 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2136 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2137 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2138 return;
2139 }
2140
2141 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2142 ASSERT(ntps > 0);
2143
2144 atomic_add_32(&fasttrap_total, ntps);
2145
2146 if (fasttrap_total > fasttrap_max) {
2147 atomic_add_32(&fasttrap_total, -ntps);
2148 return;
2149 }
2150
2151 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2152
2153 pp->ftp_prov = provider;
2154 pp->ftp_pid = provider->ftp_pid;
2155 pp->ftp_ntps = ntps;
2156 pp->ftp_nargs = dhpb->dthpb_xargc;
2157 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2158 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2159
2160 /*
2161 * First create a tracepoint for each actual point of interest.
2162 */
2163 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2164 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2165
2166 tp->ftt_proc = provider->ftp_proc;
2167 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
2168 tp->ftt_pid = provider->ftp_pid;
2169
2170 pp->ftp_tps[i].fit_tp = tp;
2171 pp->ftp_tps[i].fit_id.fti_probe = pp;
2172 #ifdef __sparc
2173 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
2174 #else
2175 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2176 #endif
2177 }
2178
2179 /*
2180 * Then create a tracepoint for each is-enabled point.
2181 */
2182 for (j = 0; i < ntps; i++, j++) {
2183 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
2184
2185 tp->ftt_proc = provider->ftp_proc;
2186 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
2187 tp->ftt_pid = provider->ftp_pid;
2188
2189 pp->ftp_tps[i].fit_tp = tp;
2190 pp->ftp_tps[i].fit_id.fti_probe = pp;
2191 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2192 }
2193
2194 /*
2195 * If the arguments are shuffled around we set the argument remapping
2196 * table. Later, when the probe fires, we only remap the arguments
2197 * if the table is non-NULL.
2198 */
2199 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2200 if (dhpb->dthpb_args[i] != i) {
2201 pp->ftp_argmap = dhpb->dthpb_args;
2202 break;
2203 }
2204 }
2205
2206 /*
2207 * The probe is fully constructed -- register it with DTrace.
2208 */
2209 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2210 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2211 }
2212
2213 /*ARGSUSED*/
2214 static void
2215 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
2216 {
2217 /*
2218 * Clean up the USDT provider. There may be active consumers of the
2219 * provider busy adding probes, no damage will actually befall the
2220 * provider until that count has dropped to zero. This just puts
2221 * the provider on death row.
2222 */
2223 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
2224 }
2225
2226 static dtrace_mops_t fasttrap_mops = {
2227 .dtms_create_probe = fasttrap_meta_create_probe,
2228 .dtms_provide_pid = fasttrap_meta_provide,
2229 .dtms_remove_pid = fasttrap_meta_remove
2230 };
2231
2232 /*ARGSUSED*/
2233 static int
2234 fasttrap_open(struct cdev *dev __unused, int oflags __unused,
2235 int devtype __unused, struct thread *td __unused)
2236 {
2237 return (0);
2238 }
2239
2240 /*ARGSUSED*/
2241 static int
2242 fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag,
2243 struct thread *td)
2244 {
2245 if (!dtrace_attached())
2246 return (EAGAIN);
2247
2248 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2249 fasttrap_probe_spec_t *uprobe = *(fasttrap_probe_spec_t **)arg;
2250 fasttrap_probe_spec_t *probe;
2251 uint64_t noffs;
2252 size_t size;
2253 int ret, err;
2254
2255 if (copyin(&uprobe->ftps_noffs, &noffs,
2256 sizeof (uprobe->ftps_noffs)))
2257 return (EFAULT);
2258
2259 /*
2260 * Probes must have at least one tracepoint.
2261 */
2262 if (noffs == 0)
2263 return (EINVAL);
2264
2265 size = sizeof (fasttrap_probe_spec_t) +
2266 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2267
2268 if (size > 1024 * 1024)
2269 return (ENOMEM);
2270
2271 probe = kmem_alloc(size, KM_SLEEP);
2272
2273 if (copyin(uprobe, probe, size) != 0 ||
2274 probe->ftps_noffs != noffs) {
2275 kmem_free(probe, size);
2276 return (EFAULT);
2277 }
2278
2279 /*
2280 * Verify that the function and module strings contain no
2281 * funny characters.
2282 */
2283 if (u8_validate(probe->ftps_func, strlen(probe->ftps_func),
2284 NULL, U8_VALIDATE_ENTIRE, &err) < 0) {
2285 ret = EINVAL;
2286 goto err;
2287 }
2288
2289 if (u8_validate(probe->ftps_mod, strlen(probe->ftps_mod),
2290 NULL, U8_VALIDATE_ENTIRE, &err) < 0) {
2291 ret = EINVAL;
2292 goto err;
2293 }
2294
2295 #ifdef notyet
2296 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2297 proc_t *p;
2298 pid_t pid = probe->ftps_pid;
2299
2300 mutex_enter(&pidlock);
2301 /*
2302 * Report an error if the process doesn't exist
2303 * or is actively being birthed.
2304 */
2305 if ((p = pfind(pid)) == NULL || p->p_stat == SIDL) {
2306 mutex_exit(&pidlock);
2307 return (ESRCH);
2308 }
2309 mutex_enter(&p->p_lock);
2310 mutex_exit(&pidlock);
2311
2312 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2313 VREAD | VWRITE)) != 0) {
2314 mutex_exit(&p->p_lock);
2315 return (ret);
2316 }
2317 mutex_exit(&p->p_lock);
2318 }
2319 #endif /* notyet */
2320
2321 ret = fasttrap_add_probe(probe);
2322 err:
2323 kmem_free(probe, size);
2324
2325 return (ret);
2326
2327 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2328 fasttrap_instr_query_t instr;
2329 fasttrap_tracepoint_t *tp;
2330 uint_t index;
2331 #ifdef notyet
2332 int ret;
2333 #endif
2334
2335 if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
2336 return (EFAULT);
2337
2338 #ifdef notyet
2339 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2340 proc_t *p;
2341 pid_t pid = instr.ftiq_pid;
2342
2343 mutex_enter(&pidlock);
2344 /*
2345 * Report an error if the process doesn't exist
2346 * or is actively being birthed.
2347 */
2348 if ((p == pfind(pid)) == NULL || p->p_stat == SIDL) {
2349 mutex_exit(&pidlock);
2350 return (ESRCH);
2351 }
2352 mutex_enter(&p->p_lock);
2353 mutex_exit(&pidlock);
2354
2355 if ((ret = priv_proc_cred_perm(cr, p, NULL,
2356 VREAD)) != 0) {
2357 mutex_exit(&p->p_lock);
2358 return (ret);
2359 }
2360
2361 mutex_exit(&p->p_lock);
2362 }
2363 #endif /* notyet */
2364
2365 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2366
2367 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2368 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2369 while (tp != NULL) {
2370 if (instr.ftiq_pid == tp->ftt_pid &&
2371 instr.ftiq_pc == tp->ftt_pc &&
2372 tp->ftt_proc->ftpc_acount != 0)
2373 break;
2374
2375 tp = tp->ftt_next;
2376 }
2377
2378 if (tp == NULL) {
2379 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2380 return (ENOENT);
2381 }
2382
2383 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2384 sizeof (instr.ftiq_instr));
2385 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2386
2387 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2388 return (EFAULT);
2389
2390 return (0);
2391 }
2392
2393 return (EINVAL);
2394 }
2395
2396 static int
2397 fasttrap_load(void)
2398 {
2399 ulong_t nent;
2400 int i, ret;
2401
2402 /* Create the /dev/dtrace/fasttrap entry. */
2403 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
2404 "dtrace/fasttrap");
2405
2406 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF);
2407 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT,
2408 NULL);
2409
2410 #ifdef illumos
2411 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2412 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2413 #endif
2414 fasttrap_total = 0;
2415
2416 /*
2417 * Conjure up the tracepoints hashtable...
2418 */
2419 #ifdef illumos
2420 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2421 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2422 #else
2423 nent = tpoints_hash_size;
2424 #endif
2425
2426 if (nent == 0 || nent > 0x1000000)
2427 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2428
2429 tpoints_hash_size = nent;
2430
2431 if (ISP2(nent))
2432 fasttrap_tpoints.fth_nent = nent;
2433 else
2434 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2435 ASSERT(fasttrap_tpoints.fth_nent > 0);
2436 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2437 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2438 sizeof (fasttrap_bucket_t), KM_SLEEP);
2439 #ifndef illumos
2440 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2441 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx,
2442 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL);
2443 #endif
2444
2445 /*
2446 * ... and the providers hash table...
2447 */
2448 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2449 if (ISP2(nent))
2450 fasttrap_provs.fth_nent = nent;
2451 else
2452 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2453 ASSERT(fasttrap_provs.fth_nent > 0);
2454 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2455 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2456 sizeof (fasttrap_bucket_t), KM_SLEEP);
2457 #ifndef illumos
2458 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2459 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx,
2460 "providers bucket mtx", MUTEX_DEFAULT, NULL);
2461 #endif
2462
2463 ret = kproc_create(fasttrap_pid_cleanup_cb, NULL,
2464 &fasttrap_cleanup_proc, 0, 0, "ftcleanup");
2465 if (ret != 0) {
2466 destroy_dev(fasttrap_cdev);
2467 #ifndef illumos
2468 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2469 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx);
2470 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2471 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx);
2472 #endif
2473 kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent *
2474 sizeof (fasttrap_bucket_t));
2475 mtx_destroy(&fasttrap_cleanup_mtx);
2476 mutex_destroy(&fasttrap_count_mtx);
2477 return (ret);
2478 }
2479
2480
2481 /*
2482 * ... and the procs hash table.
2483 */
2484 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2485 if (ISP2(nent))
2486 fasttrap_procs.fth_nent = nent;
2487 else
2488 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2489 ASSERT(fasttrap_procs.fth_nent > 0);
2490 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2491 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2492 sizeof (fasttrap_bucket_t), KM_SLEEP);
2493 #ifndef illumos
2494 for (i = 0; i < fasttrap_procs.fth_nent; i++)
2495 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx,
2496 "processes bucket mtx", MUTEX_DEFAULT, NULL);
2497
2498 rm_init(&fasttrap_tp_lock, "fasttrap tracepoint");
2499
2500 /*
2501 * This event handler must run before kdtrace_thread_dtor() since it
2502 * accesses the thread's struct kdtrace_thread.
2503 */
2504 fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor,
2505 fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST);
2506 #endif
2507
2508 /*
2509 * Install our hooks into fork(2), exec(2), and exit(2).
2510 */
2511 dtrace_fasttrap_fork = &fasttrap_fork;
2512 dtrace_fasttrap_exit = &fasttrap_exec_exit;
2513 dtrace_fasttrap_exec = &fasttrap_exec_exit;
2514
2515 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2516 &fasttrap_meta_id);
2517
2518 return (0);
2519 }
2520
2521 static int
2522 fasttrap_unload(void)
2523 {
2524 int i, fail = 0;
2525
2526 /*
2527 * Unregister the meta-provider to make sure no new fasttrap-
2528 * managed providers come along while we're trying to close up
2529 * shop. If we fail to detach, we'll need to re-register as a
2530 * meta-provider. We can fail to unregister as a meta-provider
2531 * if providers we manage still exist.
2532 */
2533 if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2534 dtrace_meta_unregister(fasttrap_meta_id) != 0)
2535 return (-1);
2536
2537 /*
2538 * Iterate over all of our providers. If there's still a process
2539 * that corresponds to that pid, fail to detach.
2540 */
2541 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2542 fasttrap_provider_t **fpp, *fp;
2543 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2544
2545 mutex_enter(&bucket->ftb_mtx);
2546 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2547 while ((fp = *fpp) != NULL) {
2548 /*
2549 * Acquire and release the lock as a simple way of
2550 * waiting for any other consumer to finish with
2551 * this provider. A thread must first acquire the
2552 * bucket lock so there's no chance of another thread
2553 * blocking on the provider's lock.
2554 */
2555 mutex_enter(&fp->ftp_mtx);
2556 mutex_exit(&fp->ftp_mtx);
2557
2558 if (dtrace_unregister(fp->ftp_provid) != 0) {
2559 fail = 1;
2560 fpp = &fp->ftp_next;
2561 } else {
2562 *fpp = fp->ftp_next;
2563 fasttrap_provider_free(fp);
2564 }
2565 }
2566
2567 mutex_exit(&bucket->ftb_mtx);
2568 }
2569
2570 if (fail) {
2571 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2572 &fasttrap_meta_id);
2573
2574 return (-1);
2575 }
2576
2577 /*
2578 * Stop new processes from entering these hooks now, before the
2579 * fasttrap_cleanup thread runs. That way all processes will hopefully
2580 * be out of these hooks before we free fasttrap_provs.fth_table
2581 */
2582 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork);
2583 dtrace_fasttrap_fork = NULL;
2584
2585 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit);
2586 dtrace_fasttrap_exec = NULL;
2587
2588 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit);
2589 dtrace_fasttrap_exit = NULL;
2590
2591 mtx_lock(&fasttrap_cleanup_mtx);
2592 fasttrap_cleanup_drain = 1;
2593 /* Wait for the cleanup thread to finish up and signal us. */
2594 wakeup(&fasttrap_cleanup_cv);
2595 mtx_sleep(&fasttrap_cleanup_drain, &fasttrap_cleanup_mtx, 0, "ftcld",
2596 0);
2597 fasttrap_cleanup_proc = NULL;
2598 mtx_destroy(&fasttrap_cleanup_mtx);
2599
2600 #ifdef DEBUG
2601 mutex_enter(&fasttrap_count_mtx);
2602 ASSERT(fasttrap_pid_count == 0);
2603 mutex_exit(&fasttrap_count_mtx);
2604 #endif
2605
2606 #ifndef illumos
2607 EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag);
2608
2609 for (i = 0; i < fasttrap_tpoints.fth_nent; i++)
2610 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx);
2611 for (i = 0; i < fasttrap_provs.fth_nent; i++)
2612 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx);
2613 for (i = 0; i < fasttrap_procs.fth_nent; i++)
2614 mutex_destroy(&fasttrap_procs.fth_table[i].ftb_mtx);
2615 #endif
2616 kmem_free(fasttrap_tpoints.fth_table,
2617 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2618 fasttrap_tpoints.fth_nent = 0;
2619
2620 kmem_free(fasttrap_provs.fth_table,
2621 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2622 fasttrap_provs.fth_nent = 0;
2623
2624 kmem_free(fasttrap_procs.fth_table,
2625 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2626 fasttrap_procs.fth_nent = 0;
2627
2628 #ifndef illumos
2629 destroy_dev(fasttrap_cdev);
2630 mutex_destroy(&fasttrap_count_mtx);
2631 rm_destroy(&fasttrap_tp_lock);
2632 #endif
2633
2634 return (0);
2635 }
2636
2637 /* ARGSUSED */
2638 static int
2639 fasttrap_modevent(module_t mod __unused, int type, void *data __unused)
2640 {
2641 int error = 0;
2642
2643 switch (type) {
2644 case MOD_LOAD:
2645 break;
2646
2647 case MOD_UNLOAD:
2648 break;
2649
2650 case MOD_SHUTDOWN:
2651 break;
2652
2653 default:
2654 error = EOPNOTSUPP;
2655 break;
2656 }
2657 return (error);
2658 }
2659
2660 SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load,
2661 NULL);
2662 SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY,
2663 fasttrap_unload, NULL);
2664
2665 DEV_MODULE(fasttrap, fasttrap_modevent, NULL);
2666 MODULE_VERSION(fasttrap, 1);
2667 MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1);
2668 MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1);
Cache object: 40f1f4b2fa9ff3def0806ed82372f1ab
|