FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_lock.c
1 /*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/8.2/sys/kern/subr_lock.c 215588 2010-11-20 20:34:13Z brucec $");
37
38 #include "opt_ddb.h"
39 #include "opt_mprof.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/linker_set.h>
46 #include <sys/lock.h>
47 #include <sys/lock_profile.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56
57 #ifdef DDB
58 #include <ddb/ddb.h>
59 #endif
60
61 #include <machine/cpufunc.h>
62
63 CTASSERT(LOCK_CLASS_MAX == 15);
64
65 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
66 &lock_class_mtx_spin,
67 &lock_class_mtx_sleep,
68 &lock_class_sx,
69 &lock_class_rm,
70 &lock_class_rw,
71 &lock_class_lockmgr,
72 };
73
74 void
75 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
76 const char *type, int flags)
77 {
78 int i;
79
80 /* Check for double-init and zero object. */
81 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
82 name, lock));
83
84 /* Look up lock class to find its index. */
85 for (i = 0; i < LOCK_CLASS_MAX; i++)
86 if (lock_classes[i] == class) {
87 lock->lo_flags = i << LO_CLASSSHIFT;
88 break;
89 }
90 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
91
92 /* Initialize the lock object. */
93 lock->lo_name = name;
94 lock->lo_flags |= flags | LO_INITIALIZED;
95 LOCK_LOG_INIT(lock, 0);
96 WITNESS_INIT(lock, (type != NULL) ? type : name);
97 }
98
99 void
100 lock_destroy(struct lock_object *lock)
101 {
102
103 KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
104 WITNESS_DESTROY(lock);
105 LOCK_LOG_DESTROY(lock, 0);
106 lock->lo_flags &= ~LO_INITIALIZED;
107 }
108
109 #ifdef DDB
110 DB_SHOW_COMMAND(lock, db_show_lock)
111 {
112 struct lock_object *lock;
113 struct lock_class *class;
114
115 if (!have_addr)
116 return;
117 lock = (struct lock_object *)addr;
118 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
119 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
120 return;
121 }
122 class = LOCK_CLASS(lock);
123 db_printf(" class: %s\n", class->lc_name);
124 db_printf(" name: %s\n", lock->lo_name);
125 class->lc_ddb_show(lock);
126 }
127 #endif
128
129 #ifdef LOCK_PROFILING
130
131 /*
132 * One object per-thread for each lock the thread owns. Tracks individual
133 * lock instances.
134 */
135 struct lock_profile_object {
136 LIST_ENTRY(lock_profile_object) lpo_link;
137 struct lock_object *lpo_obj;
138 const char *lpo_file;
139 int lpo_line;
140 uint16_t lpo_ref;
141 uint16_t lpo_cnt;
142 u_int64_t lpo_acqtime;
143 u_int64_t lpo_waittime;
144 u_int lpo_contest_locking;
145 };
146
147 /*
148 * One lock_prof for each (file, line, lock object) triple.
149 */
150 struct lock_prof {
151 SLIST_ENTRY(lock_prof) link;
152 struct lock_class *class;
153 const char *file;
154 const char *name;
155 int line;
156 int ticks;
157 uintmax_t cnt_wait_max;
158 uintmax_t cnt_max;
159 uintmax_t cnt_tot;
160 uintmax_t cnt_wait;
161 uintmax_t cnt_cur;
162 uintmax_t cnt_contest_locking;
163 };
164
165 SLIST_HEAD(lphead, lock_prof);
166
167 #define LPROF_HASH_SIZE 4096
168 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1)
169 #define LPROF_CACHE_SIZE 4096
170
171 /*
172 * Array of objects and profs for each type of object for each cpu. Spinlocks
173 * are handled separately because a thread may be preempted and acquire a
174 * spinlock while in the lock profiling code of a non-spinlock. In this way
175 * we only need a critical section to protect the per-cpu lists.
176 */
177 struct lock_prof_type {
178 struct lphead lpt_lpalloc;
179 struct lpohead lpt_lpoalloc;
180 struct lphead lpt_hash[LPROF_HASH_SIZE];
181 struct lock_prof lpt_prof[LPROF_CACHE_SIZE];
182 struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
183 };
184
185 struct lock_prof_cpu {
186 struct lock_prof_type lpc_types[2]; /* One for spin one for other. */
187 };
188
189 struct lock_prof_cpu *lp_cpu[MAXCPU];
190
191 volatile int lock_prof_enable = 0;
192 static volatile int lock_prof_resetting;
193
194 /* SWAG: sbuf size = avg stat. line size * number of locks */
195 #define LPROF_SBUF_SIZE 256 * 400
196
197 static int lock_prof_rejected;
198 static int lock_prof_skipspin;
199 static int lock_prof_skipcount;
200
201 #ifndef USE_CPU_NANOSECONDS
202 u_int64_t
203 nanoseconds(void)
204 {
205 struct bintime bt;
206 u_int64_t ns;
207
208 binuptime(&bt);
209 /* From bintime2timespec */
210 ns = bt.sec * (u_int64_t)1000000000;
211 ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
212 return (ns);
213 }
214 #endif
215
216 static void
217 lock_prof_init_type(struct lock_prof_type *type)
218 {
219 int i;
220
221 SLIST_INIT(&type->lpt_lpalloc);
222 LIST_INIT(&type->lpt_lpoalloc);
223 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
224 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
225 link);
226 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
227 lpo_link);
228 }
229 }
230
231 static void
232 lock_prof_init(void *arg)
233 {
234 int cpu;
235
236 for (cpu = 0; cpu <= mp_maxid; cpu++) {
237 lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
238 M_WAITOK | M_ZERO);
239 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
240 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
241 }
242 }
243 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
244
245 /*
246 * To be certain that lock profiling has idled on all cpus before we
247 * reset, we schedule the resetting thread on all active cpus. Since
248 * all operations happen within critical sections we can be sure that
249 * it is safe to zero the profiling structures.
250 */
251 static void
252 lock_prof_idle(void)
253 {
254 struct thread *td;
255 int cpu;
256
257 td = curthread;
258 thread_lock(td);
259 for (cpu = 0; cpu <= mp_maxid; cpu++) {
260 if (CPU_ABSENT(cpu))
261 continue;
262 sched_bind(td, cpu);
263 }
264 sched_unbind(td);
265 thread_unlock(td);
266 }
267
268 static void
269 lock_prof_reset_wait(void)
270 {
271
272 /*
273 * Spin relinquishing our cpu so that lock_prof_idle may
274 * run on it.
275 */
276 while (lock_prof_resetting)
277 sched_relinquish(curthread);
278 }
279
280 static void
281 lock_prof_reset(void)
282 {
283 struct lock_prof_cpu *lpc;
284 int enabled, i, cpu;
285
286 /*
287 * We not only race with acquiring and releasing locks but also
288 * thread exit. To be certain that threads exit without valid head
289 * pointers they must see resetting set before enabled is cleared.
290 * Otherwise a lock may not be removed from a per-thread list due
291 * to disabled being set but not wait for reset() to remove it below.
292 */
293 atomic_store_rel_int(&lock_prof_resetting, 1);
294 enabled = lock_prof_enable;
295 lock_prof_enable = 0;
296 lock_prof_idle();
297 /*
298 * Some objects may have migrated between CPUs. Clear all links
299 * before we zero the structures. Some items may still be linked
300 * into per-thread lists as well.
301 */
302 for (cpu = 0; cpu <= mp_maxid; cpu++) {
303 lpc = lp_cpu[cpu];
304 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
305 LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
306 LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
307 }
308 }
309 for (cpu = 0; cpu <= mp_maxid; cpu++) {
310 lpc = lp_cpu[cpu];
311 bzero(lpc, sizeof(*lpc));
312 lock_prof_init_type(&lpc->lpc_types[0]);
313 lock_prof_init_type(&lpc->lpc_types[1]);
314 }
315 atomic_store_rel_int(&lock_prof_resetting, 0);
316 lock_prof_enable = enabled;
317 }
318
319 static void
320 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
321 {
322 const char *p;
323
324 for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
325 sbuf_printf(sb,
326 "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
327 lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
328 lp->cnt_wait / 1000, lp->cnt_cur,
329 lp->cnt_cur == 0 ? (uintmax_t)0 :
330 lp->cnt_tot / (lp->cnt_cur * 1000),
331 lp->cnt_cur == 0 ? (uintmax_t)0 :
332 lp->cnt_wait / (lp->cnt_cur * 1000),
333 (uintmax_t)0, lp->cnt_contest_locking,
334 p, lp->line, lp->class->lc_name, lp->name);
335 }
336
337 static void
338 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
339 int spin, int t)
340 {
341 struct lock_prof_type *type;
342 struct lock_prof *l;
343 int cpu;
344
345 dst->file = match->file;
346 dst->line = match->line;
347 dst->class = match->class;
348 dst->name = match->name;
349
350 for (cpu = 0; cpu <= mp_maxid; cpu++) {
351 if (lp_cpu[cpu] == NULL)
352 continue;
353 type = &lp_cpu[cpu]->lpc_types[spin];
354 SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
355 if (l->ticks == t)
356 continue;
357 if (l->file != match->file || l->line != match->line ||
358 l->name != match->name)
359 continue;
360 l->ticks = t;
361 if (l->cnt_max > dst->cnt_max)
362 dst->cnt_max = l->cnt_max;
363 if (l->cnt_wait_max > dst->cnt_wait_max)
364 dst->cnt_wait_max = l->cnt_wait_max;
365 dst->cnt_tot += l->cnt_tot;
366 dst->cnt_wait += l->cnt_wait;
367 dst->cnt_cur += l->cnt_cur;
368 dst->cnt_contest_locking += l->cnt_contest_locking;
369 }
370 }
371
372 }
373
374 static void
375 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
376 int t)
377 {
378 struct lock_prof *l;
379 int i;
380
381 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
382 SLIST_FOREACH(l, &type->lpt_hash[i], link) {
383 struct lock_prof lp = {};
384
385 if (l->ticks == t)
386 continue;
387 lock_prof_sum(l, &lp, i, spin, t);
388 lock_prof_output(&lp, sb);
389 if (sbuf_overflowed(sb))
390 return;
391 }
392 }
393 }
394
395 static int
396 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
397 {
398 static int multiplier = 1;
399 struct sbuf *sb;
400 int error, cpu, t;
401 int enabled;
402
403 retry_sbufops:
404 sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
405 sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
406 "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
407 enabled = lock_prof_enable;
408 lock_prof_enable = 0;
409 lock_prof_idle();
410 t = ticks;
411 for (cpu = 0; cpu <= mp_maxid; cpu++) {
412 if (lp_cpu[cpu] == NULL)
413 continue;
414 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
415 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
416 if (sbuf_overflowed(sb)) {
417 sbuf_delete(sb);
418 multiplier++;
419 goto retry_sbufops;
420 }
421 }
422 lock_prof_enable = enabled;
423
424 sbuf_finish(sb);
425 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
426 sbuf_delete(sb);
427 return (error);
428 }
429
430 static int
431 enable_lock_prof(SYSCTL_HANDLER_ARGS)
432 {
433 int error, v;
434
435 v = lock_prof_enable;
436 error = sysctl_handle_int(oidp, &v, v, req);
437 if (error)
438 return (error);
439 if (req->newptr == NULL)
440 return (error);
441 if (v == lock_prof_enable)
442 return (0);
443 if (v == 1)
444 lock_prof_reset();
445 lock_prof_enable = !!v;
446
447 return (0);
448 }
449
450 static int
451 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
452 {
453 int error, v;
454
455 v = 0;
456 error = sysctl_handle_int(oidp, &v, 0, req);
457 if (error)
458 return (error);
459 if (req->newptr == NULL)
460 return (error);
461 if (v == 0)
462 return (0);
463 lock_prof_reset();
464
465 return (0);
466 }
467
468 static struct lock_prof *
469 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
470 int line)
471 {
472 const char *unknown = "(unknown)";
473 struct lock_prof_type *type;
474 struct lock_prof *lp;
475 struct lphead *head;
476 const char *p;
477 u_int hash;
478
479 p = file;
480 if (p == NULL || *p == '\0')
481 p = unknown;
482 hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
483 hash &= LPROF_HASH_MASK;
484 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
485 head = &type->lpt_hash[hash];
486 SLIST_FOREACH(lp, head, link) {
487 if (lp->line == line && lp->file == p &&
488 lp->name == lo->lo_name)
489 return (lp);
490
491 }
492 lp = SLIST_FIRST(&type->lpt_lpalloc);
493 if (lp == NULL) {
494 lock_prof_rejected++;
495 return (lp);
496 }
497 SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
498 lp->file = p;
499 lp->line = line;
500 lp->class = LOCK_CLASS(lo);
501 lp->name = lo->lo_name;
502 SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
503 return (lp);
504 }
505
506 static struct lock_profile_object *
507 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
508 int line)
509 {
510 struct lock_profile_object *l;
511 struct lock_prof_type *type;
512 struct lpohead *head;
513
514 head = &curthread->td_lprof[spin];
515 LIST_FOREACH(l, head, lpo_link)
516 if (l->lpo_obj == lo && l->lpo_file == file &&
517 l->lpo_line == line)
518 return (l);
519 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
520 l = LIST_FIRST(&type->lpt_lpoalloc);
521 if (l == NULL) {
522 lock_prof_rejected++;
523 return (NULL);
524 }
525 LIST_REMOVE(l, lpo_link);
526 l->lpo_obj = lo;
527 l->lpo_file = file;
528 l->lpo_line = line;
529 l->lpo_cnt = 0;
530 LIST_INSERT_HEAD(head, l, lpo_link);
531
532 return (l);
533 }
534
535 void
536 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
537 uint64_t waittime, const char *file, int line)
538 {
539 static int lock_prof_count;
540 struct lock_profile_object *l;
541 int spin;
542
543 /* don't reset the timer when/if recursing */
544 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
545 return;
546 if (lock_prof_skipcount &&
547 (++lock_prof_count % lock_prof_skipcount) != 0)
548 return;
549 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
550 if (spin && lock_prof_skipspin == 1)
551 return;
552 critical_enter();
553 /* Recheck enabled now that we're in a critical section. */
554 if (lock_prof_enable == 0)
555 goto out;
556 l = lock_profile_object_lookup(lo, spin, file, line);
557 if (l == NULL)
558 goto out;
559 l->lpo_cnt++;
560 if (++l->lpo_ref > 1)
561 goto out;
562 l->lpo_contest_locking = contested;
563 l->lpo_acqtime = nanoseconds();
564 if (waittime && (l->lpo_acqtime > waittime))
565 l->lpo_waittime = l->lpo_acqtime - waittime;
566 else
567 l->lpo_waittime = 0;
568 out:
569 critical_exit();
570 }
571
572 void
573 lock_profile_thread_exit(struct thread *td)
574 {
575 #ifdef INVARIANTS
576 struct lock_profile_object *l;
577
578 MPASS(curthread->td_critnest == 0);
579 #endif
580 /*
581 * If lock profiling was disabled we have to wait for reset to
582 * clear our pointers before we can exit safely.
583 */
584 lock_prof_reset_wait();
585 #ifdef INVARIANTS
586 LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
587 printf("thread still holds lock acquired at %s:%d\n",
588 l->lpo_file, l->lpo_line);
589 LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
590 printf("thread still holds lock acquired at %s:%d\n",
591 l->lpo_file, l->lpo_line);
592 #endif
593 MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
594 MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
595 }
596
597 void
598 lock_profile_release_lock(struct lock_object *lo)
599 {
600 struct lock_profile_object *l;
601 struct lock_prof_type *type;
602 struct lock_prof *lp;
603 u_int64_t curtime, holdtime;
604 struct lpohead *head;
605 int spin;
606
607 if (lo->lo_flags & LO_NOPROFILE)
608 return;
609 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
610 head = &curthread->td_lprof[spin];
611 if (LIST_FIRST(head) == NULL)
612 return;
613 critical_enter();
614 /* Recheck enabled now that we're in a critical section. */
615 if (lock_prof_enable == 0 && lock_prof_resetting == 1)
616 goto out;
617 /*
618 * If lock profiling is not enabled we still want to remove the
619 * lpo from our queue.
620 */
621 LIST_FOREACH(l, head, lpo_link)
622 if (l->lpo_obj == lo)
623 break;
624 if (l == NULL)
625 goto out;
626 if (--l->lpo_ref > 0)
627 goto out;
628 lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
629 if (lp == NULL)
630 goto release;
631 curtime = nanoseconds();
632 if (curtime < l->lpo_acqtime)
633 goto release;
634 holdtime = curtime - l->lpo_acqtime;
635
636 /*
637 * Record if the lock has been held longer now than ever
638 * before.
639 */
640 if (holdtime > lp->cnt_max)
641 lp->cnt_max = holdtime;
642 if (l->lpo_waittime > lp->cnt_wait_max)
643 lp->cnt_wait_max = l->lpo_waittime;
644 lp->cnt_tot += holdtime;
645 lp->cnt_wait += l->lpo_waittime;
646 lp->cnt_contest_locking += l->lpo_contest_locking;
647 lp->cnt_cur += l->lpo_cnt;
648 release:
649 LIST_REMOVE(l, lpo_link);
650 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
651 LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
652 out:
653 critical_exit();
654 }
655
656 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
657 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
658 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
659 &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
660 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
661 &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
662 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
663 &lock_prof_rejected, 0, "Number of rejected profiling records");
664 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
665 NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
666 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
667 NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
668 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
669 NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
670
671 #endif
Cache object: 3ec08b9c2ed4eb92cdf51170023f7064
|