FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Berkeley Software Design Inc's name may not be used to endorse or
15 * promote products derived from this software without specific prior
16 * written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
31 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
32 */
33
34 /*
35 * Machine independent bits of mutex implementation.
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_mutex.c 340269 2018-11-08 22:39:38Z jhb $");
40
41 #include "opt_adaptive_mutexes.h"
42 #include "opt_ddb.h"
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_sched.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/smp.h>
61 #include <sys/sysctl.h>
62 #include <sys/turnstile.h>
63 #include <sys/vmmeter.h>
64 #include <sys/lock_profile.h>
65
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/cpu.h>
69
70 #include <ddb/ddb.h>
71
72 #include <fs/devfs/devfs_int.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76
77 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78 #define ADAPTIVE_MUTEXES
79 #endif
80
81 #ifdef HWPMC_HOOKS
82 #include <sys/pmckern.h>
83 PMC_SOFT_DEFINE( , , lock, failed);
84 #endif
85
86 /*
87 * Return the mutex address when the lock cookie address is provided.
88 * This functionality assumes that struct mtx* have a member named mtx_lock.
89 */
90 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
91
92 /*
93 * Internal utility macros.
94 */
95 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
96
97 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98
99 static void assert_mtx(const struct lock_object *lock, int what);
100 #ifdef DDB
101 static void db_show_mtx(const struct lock_object *lock);
102 #endif
103 static void lock_mtx(struct lock_object *lock, uintptr_t how);
104 static void lock_spin(struct lock_object *lock, uintptr_t how);
105 #ifdef KDTRACE_HOOKS
106 static int owner_mtx(const struct lock_object *lock,
107 struct thread **owner);
108 #endif
109 static uintptr_t unlock_mtx(struct lock_object *lock);
110 static uintptr_t unlock_spin(struct lock_object *lock);
111
112 /*
113 * Lock classes for sleep and spin mutexes.
114 */
115 struct lock_class lock_class_mtx_sleep = {
116 .lc_name = "sleep mutex",
117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
118 .lc_assert = assert_mtx,
119 #ifdef DDB
120 .lc_ddb_show = db_show_mtx,
121 #endif
122 .lc_lock = lock_mtx,
123 .lc_unlock = unlock_mtx,
124 #ifdef KDTRACE_HOOKS
125 .lc_owner = owner_mtx,
126 #endif
127 };
128 struct lock_class lock_class_mtx_spin = {
129 .lc_name = "spin mutex",
130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
131 .lc_assert = assert_mtx,
132 #ifdef DDB
133 .lc_ddb_show = db_show_mtx,
134 #endif
135 .lc_lock = lock_spin,
136 .lc_unlock = unlock_spin,
137 #ifdef KDTRACE_HOOKS
138 .lc_owner = owner_mtx,
139 #endif
140 };
141
142 #ifdef ADAPTIVE_MUTEXES
143 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
144
145 static struct lock_delay_config __read_frequently mtx_delay;
146
147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
148 0, "");
149 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
150 0, "");
151
152 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
153 #endif
154
155 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
156 "mtx spin debugging");
157
158 static struct lock_delay_config __read_frequently mtx_spin_delay;
159
160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
161 &mtx_spin_delay.base, 0, "");
162 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
163 &mtx_spin_delay.max, 0, "");
164
165 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
166
167 /*
168 * System-wide mutexes
169 */
170 struct mtx blocked_lock;
171 struct mtx __exclusive_cache_line Giant;
172
173 static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *);
174
175 void
176 assert_mtx(const struct lock_object *lock, int what)
177 {
178
179 mtx_assert((const struct mtx *)lock, what);
180 }
181
182 void
183 lock_mtx(struct lock_object *lock, uintptr_t how)
184 {
185
186 mtx_lock((struct mtx *)lock);
187 }
188
189 void
190 lock_spin(struct lock_object *lock, uintptr_t how)
191 {
192
193 panic("spin locks can only use msleep_spin");
194 }
195
196 uintptr_t
197 unlock_mtx(struct lock_object *lock)
198 {
199 struct mtx *m;
200
201 m = (struct mtx *)lock;
202 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
203 mtx_unlock(m);
204 return (0);
205 }
206
207 uintptr_t
208 unlock_spin(struct lock_object *lock)
209 {
210
211 panic("spin locks can only use msleep_spin");
212 }
213
214 #ifdef KDTRACE_HOOKS
215 int
216 owner_mtx(const struct lock_object *lock, struct thread **owner)
217 {
218 const struct mtx *m;
219 uintptr_t x;
220
221 m = (const struct mtx *)lock;
222 x = m->mtx_lock;
223 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
224 return (*owner != NULL);
225 }
226 #endif
227
228 /*
229 * Function versions of the inlined __mtx_* macros. These are used by
230 * modules and can also be called from assembly language if needed.
231 */
232 void
233 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
234 {
235 struct mtx *m;
236 uintptr_t tid, v;
237
238 m = mtxlock2mtx(c);
239
240 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
241 !TD_IS_IDLETHREAD(curthread),
242 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
243 curthread, m->lock_object.lo_name, file, line));
244 KASSERT(m->mtx_lock != MTX_DESTROYED,
245 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
246 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
247 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
248 file, line));
249 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
250 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
251
252 tid = (uintptr_t)curthread;
253 v = MTX_UNOWNED;
254 if (!_mtx_obtain_lock_fetch(m, &v, tid))
255 _mtx_lock_sleep(m, v, opts, file, line);
256 else
257 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
258 m, 0, 0, file, line);
259 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
260 line);
261 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
262 file, line);
263 TD_LOCKS_INC(curthread);
264 }
265
266 void
267 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
268 {
269 struct mtx *m;
270
271 m = mtxlock2mtx(c);
272
273 KASSERT(m->mtx_lock != MTX_DESTROYED,
274 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
275 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
276 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
277 file, line));
278 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
279 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
280 line);
281 mtx_assert(m, MA_OWNED);
282
283 #ifdef LOCK_PROFILING
284 __mtx_unlock_sleep(c, (uintptr_t)curthread, opts, file, line);
285 #else
286 __mtx_unlock(m, curthread, opts, file, line);
287 #endif
288 TD_LOCKS_DEC(curthread);
289 }
290
291 void
292 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
293 int line)
294 {
295 struct mtx *m;
296 #ifdef SMP
297 uintptr_t tid, v;
298 #endif
299
300 m = mtxlock2mtx(c);
301
302 KASSERT(m->mtx_lock != MTX_DESTROYED,
303 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
304 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
305 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
306 m->lock_object.lo_name, file, line));
307 if (mtx_owned(m))
308 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
309 (opts & MTX_RECURSE) != 0,
310 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
311 m->lock_object.lo_name, file, line));
312 opts &= ~MTX_RECURSE;
313 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
314 file, line, NULL);
315 #ifdef SMP
316 spinlock_enter();
317 tid = (uintptr_t)curthread;
318 v = MTX_UNOWNED;
319 if (!_mtx_obtain_lock_fetch(m, &v, tid))
320 _mtx_lock_spin(m, v, opts, file, line);
321 else
322 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
323 m, 0, 0, file, line);
324 #else
325 __mtx_lock_spin(m, curthread, opts, file, line);
326 #endif
327 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
328 line);
329 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
330 }
331
332 int
333 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
334 int line)
335 {
336 struct mtx *m;
337
338 if (SCHEDULER_STOPPED())
339 return (1);
340
341 m = mtxlock2mtx(c);
342
343 KASSERT(m->mtx_lock != MTX_DESTROYED,
344 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
345 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
346 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
347 m->lock_object.lo_name, file, line));
348 KASSERT((opts & MTX_RECURSE) == 0,
349 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
350 m->lock_object.lo_name, file, line));
351 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
352 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
353 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
354 return (1);
355 }
356 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
357 return (0);
358 }
359
360 void
361 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
362 int line)
363 {
364 struct mtx *m;
365
366 m = mtxlock2mtx(c);
367
368 KASSERT(m->mtx_lock != MTX_DESTROYED,
369 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
370 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
371 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
372 m->lock_object.lo_name, file, line));
373 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
374 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
375 line);
376 mtx_assert(m, MA_OWNED);
377
378 __mtx_unlock_spin(m);
379 }
380
381 /*
382 * The important part of mtx_trylock{,_flags}()
383 * Tries to acquire lock `m.' If this function is called on a mutex that
384 * is already owned, it will recursively acquire the lock.
385 */
386 int
387 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
388 {
389 struct thread *td;
390 uintptr_t tid, v;
391 #ifdef LOCK_PROFILING
392 uint64_t waittime = 0;
393 int contested = 0;
394 #endif
395 int rval;
396 bool recursed;
397
398 td = curthread;
399 tid = (uintptr_t)td;
400 if (SCHEDULER_STOPPED_TD(td))
401 return (1);
402
403 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
404 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
405 curthread, m->lock_object.lo_name, file, line));
406 KASSERT(m->mtx_lock != MTX_DESTROYED,
407 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
408 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
409 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
410 file, line));
411
412 rval = 1;
413 recursed = false;
414 v = MTX_UNOWNED;
415 for (;;) {
416 if (_mtx_obtain_lock_fetch(m, &v, tid))
417 break;
418 if (v == MTX_UNOWNED)
419 continue;
420 if (v == tid &&
421 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
422 (opts & MTX_RECURSE) != 0)) {
423 m->mtx_recurse++;
424 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
425 recursed = true;
426 break;
427 }
428 rval = 0;
429 break;
430 }
431
432 opts &= ~MTX_RECURSE;
433
434 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
435 if (rval) {
436 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
437 file, line);
438 TD_LOCKS_INC(curthread);
439 if (!recursed)
440 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
441 m, contested, waittime, file, line);
442 }
443
444 return (rval);
445 }
446
447 int
448 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
449 {
450 struct mtx *m;
451
452 m = mtxlock2mtx(c);
453 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
454 }
455
456 /*
457 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
458 *
459 * We call this if the lock is either contested (i.e. we need to go to
460 * sleep waiting for it), or if we need to recurse on it.
461 */
462 #if LOCK_DEBUG > 0
463 void
464 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file,
465 int line)
466 #else
467 void
468 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
469 #endif
470 {
471 struct thread *td;
472 struct mtx *m;
473 struct turnstile *ts;
474 uintptr_t tid;
475 struct thread *owner;
476 #ifdef LOCK_PROFILING
477 int contested = 0;
478 uint64_t waittime = 0;
479 #endif
480 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
481 struct lock_delay_arg lda;
482 #endif
483 #ifdef KDTRACE_HOOKS
484 u_int sleep_cnt = 0;
485 int64_t sleep_time = 0;
486 int64_t all_time = 0;
487 #endif
488 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
489 int doing_lockprof;
490 #endif
491
492 td = curthread;
493 tid = (uintptr_t)td;
494 m = mtxlock2mtx(c);
495
496 #ifdef KDTRACE_HOOKS
497 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
498 while (v == MTX_UNOWNED) {
499 if (_mtx_obtain_lock_fetch(m, &v, tid))
500 goto out_lockstat;
501 }
502 doing_lockprof = 1;
503 all_time -= lockstat_nsecs(&m->lock_object);
504 }
505 #endif
506 #ifdef LOCK_PROFILING
507 doing_lockprof = 1;
508 #endif
509
510 if (SCHEDULER_STOPPED_TD(td))
511 return;
512
513 #if defined(ADAPTIVE_MUTEXES)
514 lock_delay_arg_init(&lda, &mtx_delay);
515 #elif defined(KDTRACE_HOOKS)
516 lock_delay_arg_init(&lda, NULL);
517 #endif
518
519 if (__predict_false(v == MTX_UNOWNED))
520 v = MTX_READ_VALUE(m);
521
522 if (__predict_false(lv_mtx_owner(v) == td)) {
523 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
524 (opts & MTX_RECURSE) != 0,
525 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
526 m->lock_object.lo_name, file, line));
527 #if LOCK_DEBUG > 0
528 opts &= ~MTX_RECURSE;
529 #endif
530 m->mtx_recurse++;
531 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
532 if (LOCK_LOG_TEST(&m->lock_object, opts))
533 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
534 return;
535 }
536 #if LOCK_DEBUG > 0
537 opts &= ~MTX_RECURSE;
538 #endif
539
540 #ifdef HWPMC_HOOKS
541 PMC_SOFT_CALL( , , lock, failed);
542 #endif
543 lock_profile_obtain_lock_failed(&m->lock_object,
544 &contested, &waittime);
545 if (LOCK_LOG_TEST(&m->lock_object, opts))
546 CTR4(KTR_LOCK,
547 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
548 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
549
550 for (;;) {
551 if (v == MTX_UNOWNED) {
552 if (_mtx_obtain_lock_fetch(m, &v, tid))
553 break;
554 continue;
555 }
556 #ifdef KDTRACE_HOOKS
557 lda.spin_cnt++;
558 #endif
559 #ifdef ADAPTIVE_MUTEXES
560 /*
561 * If the owner is running on another CPU, spin until the
562 * owner stops running or the state of the lock changes.
563 */
564 owner = lv_mtx_owner(v);
565 if (TD_IS_RUNNING(owner)) {
566 if (LOCK_LOG_TEST(&m->lock_object, 0))
567 CTR3(KTR_LOCK,
568 "%s: spinning on %p held by %p",
569 __func__, m, owner);
570 KTR_STATE1(KTR_SCHED, "thread",
571 sched_tdname((struct thread *)tid),
572 "spinning", "lockname:\"%s\"",
573 m->lock_object.lo_name);
574 do {
575 lock_delay(&lda);
576 v = MTX_READ_VALUE(m);
577 owner = lv_mtx_owner(v);
578 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
579 KTR_STATE0(KTR_SCHED, "thread",
580 sched_tdname((struct thread *)tid),
581 "running");
582 continue;
583 }
584 #endif
585
586 ts = turnstile_trywait(&m->lock_object);
587 v = MTX_READ_VALUE(m);
588 retry_turnstile:
589
590 /*
591 * Check if the lock has been released while spinning for
592 * the turnstile chain lock.
593 */
594 if (v == MTX_UNOWNED) {
595 turnstile_cancel(ts);
596 continue;
597 }
598
599 #ifdef ADAPTIVE_MUTEXES
600 /*
601 * The current lock owner might have started executing
602 * on another CPU (or the lock could have changed
603 * owners) while we were waiting on the turnstile
604 * chain lock. If so, drop the turnstile lock and try
605 * again.
606 */
607 owner = lv_mtx_owner(v);
608 if (TD_IS_RUNNING(owner)) {
609 turnstile_cancel(ts);
610 continue;
611 }
612 #endif
613
614 /*
615 * If the mutex isn't already contested and a failure occurs
616 * setting the contested bit, the mutex was either released
617 * or the state of the MTX_RECURSED bit changed.
618 */
619 if ((v & MTX_CONTESTED) == 0 &&
620 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
621 goto retry_turnstile;
622 }
623
624 /*
625 * We definitely must sleep for this lock.
626 */
627 mtx_assert(m, MA_NOTOWNED);
628
629 /*
630 * Block on the turnstile.
631 */
632 #ifdef KDTRACE_HOOKS
633 sleep_time -= lockstat_nsecs(&m->lock_object);
634 #endif
635 #ifndef ADAPTIVE_MUTEXES
636 owner = mtx_owner(m);
637 #endif
638 MPASS(owner == mtx_owner(m));
639 turnstile_wait(ts, owner, TS_EXCLUSIVE_QUEUE);
640 #ifdef KDTRACE_HOOKS
641 sleep_time += lockstat_nsecs(&m->lock_object);
642 sleep_cnt++;
643 #endif
644 v = MTX_READ_VALUE(m);
645 }
646 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
647 if (__predict_true(!doing_lockprof))
648 return;
649 #endif
650 #ifdef KDTRACE_HOOKS
651 all_time += lockstat_nsecs(&m->lock_object);
652 if (sleep_time)
653 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
654
655 /*
656 * Only record the loops spinning and not sleeping.
657 */
658 if (lda.spin_cnt > sleep_cnt)
659 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
660 out_lockstat:
661 #endif
662 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
663 waittime, file, line);
664 }
665
666 #ifdef SMP
667 /*
668 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
669 *
670 * This is only called if we need to actually spin for the lock. Recursion
671 * is handled inline.
672 */
673 #if LOCK_DEBUG > 0
674 void
675 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
676 const char *file, int line)
677 #else
678 void
679 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v)
680 #endif
681 {
682 struct mtx *m;
683 struct lock_delay_arg lda;
684 uintptr_t tid;
685 #ifdef LOCK_PROFILING
686 int contested = 0;
687 uint64_t waittime = 0;
688 #endif
689 #ifdef KDTRACE_HOOKS
690 int64_t spin_time = 0;
691 #endif
692 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
693 int doing_lockprof;
694 #endif
695
696 tid = (uintptr_t)curthread;
697 m = mtxlock2mtx(c);
698
699 #ifdef KDTRACE_HOOKS
700 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
701 while (v == MTX_UNOWNED) {
702 if (_mtx_obtain_lock_fetch(m, &v, tid))
703 goto out_lockstat;
704 }
705 doing_lockprof = 1;
706 spin_time -= lockstat_nsecs(&m->lock_object);
707 }
708 #endif
709 #ifdef LOCK_PROFILING
710 doing_lockprof = 1;
711 #endif
712
713 if (__predict_false(v == MTX_UNOWNED))
714 v = MTX_READ_VALUE(m);
715
716 if (__predict_false(v == tid)) {
717 m->mtx_recurse++;
718 return;
719 }
720
721 if (SCHEDULER_STOPPED())
722 return;
723
724 lock_delay_arg_init(&lda, &mtx_spin_delay);
725
726 if (LOCK_LOG_TEST(&m->lock_object, opts))
727 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
728 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
729 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
730
731 #ifdef HWPMC_HOOKS
732 PMC_SOFT_CALL( , , lock, failed);
733 #endif
734 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
735
736 for (;;) {
737 if (v == MTX_UNOWNED) {
738 if (_mtx_obtain_lock_fetch(m, &v, tid))
739 break;
740 continue;
741 }
742 /* Give interrupts a chance while we spin. */
743 spinlock_exit();
744 do {
745 if (__predict_true(lda.spin_cnt < 10000000)) {
746 lock_delay(&lda);
747 } else {
748 _mtx_lock_indefinite_check(m, &lda);
749 }
750 v = MTX_READ_VALUE(m);
751 } while (v != MTX_UNOWNED);
752 spinlock_enter();
753 }
754
755 if (LOCK_LOG_TEST(&m->lock_object, opts))
756 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
757 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
758 "running");
759
760 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
761 if (__predict_true(!doing_lockprof))
762 return;
763 #endif
764 #ifdef KDTRACE_HOOKS
765 spin_time += lockstat_nsecs(&m->lock_object);
766 if (lda.spin_cnt != 0)
767 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
768 out_lockstat:
769 #endif
770 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
771 contested, waittime, file, line);
772 }
773 #endif /* SMP */
774
775 #ifdef INVARIANTS
776 static void
777 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
778 {
779
780 KASSERT(m->mtx_lock != MTX_DESTROYED,
781 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
782 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
783 ("thread_lock() of sleep mutex %s @ %s:%d",
784 m->lock_object.lo_name, file, line));
785 if (mtx_owned(m))
786 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
787 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
788 m->lock_object.lo_name, file, line));
789 WITNESS_CHECKORDER(&m->lock_object,
790 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
791 }
792 #else
793 #define thread_lock_validate(m, opts, file, line) do { } while (0)
794 #endif
795
796 #ifndef LOCK_PROFILING
797 #if LOCK_DEBUG > 0
798 void
799 _thread_lock(struct thread *td, int opts, const char *file, int line)
800 #else
801 void
802 _thread_lock(struct thread *td)
803 #endif
804 {
805 struct mtx *m;
806 uintptr_t tid, v;
807
808 tid = (uintptr_t)curthread;
809
810 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
811 goto slowpath_noirq;
812 spinlock_enter();
813 m = td->td_lock;
814 thread_lock_validate(m, 0, file, line);
815 v = MTX_READ_VALUE(m);
816 if (__predict_true(v == MTX_UNOWNED)) {
817 if (__predict_false(!_mtx_obtain_lock(m, tid)))
818 goto slowpath_unlocked;
819 } else if (v == tid) {
820 m->mtx_recurse++;
821 } else
822 goto slowpath_unlocked;
823 if (__predict_true(m == td->td_lock)) {
824 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
825 return;
826 }
827 MPASS(m->mtx_recurse == 0);
828 _mtx_release_lock_quick(m);
829 slowpath_unlocked:
830 spinlock_exit();
831 slowpath_noirq:
832 #if LOCK_DEBUG > 0
833 thread_lock_flags_(td, opts, file, line);
834 #else
835 thread_lock_flags_(td, 0, 0, 0);
836 #endif
837 }
838 #endif
839
840 void
841 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
842 {
843 struct mtx *m;
844 uintptr_t tid, v;
845 struct lock_delay_arg lda;
846 #ifdef LOCK_PROFILING
847 int contested = 0;
848 uint64_t waittime = 0;
849 #endif
850 #ifdef KDTRACE_HOOKS
851 int64_t spin_time = 0;
852 #endif
853 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
854 int doing_lockprof = 1;
855 #endif
856
857 tid = (uintptr_t)curthread;
858
859 if (SCHEDULER_STOPPED()) {
860 /*
861 * Ensure that spinlock sections are balanced even when the
862 * scheduler is stopped, since we may otherwise inadvertently
863 * re-enable interrupts while dumping core.
864 */
865 spinlock_enter();
866 return;
867 }
868
869 lock_delay_arg_init(&lda, &mtx_spin_delay);
870
871 #ifdef HWPMC_HOOKS
872 PMC_SOFT_CALL( , , lock, failed);
873 #endif
874
875 #ifdef LOCK_PROFILING
876 doing_lockprof = 1;
877 #elif defined(KDTRACE_HOOKS)
878 doing_lockprof = lockstat_enabled;
879 if (__predict_false(doing_lockprof))
880 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
881 #endif
882 spinlock_enter();
883
884 for (;;) {
885 retry:
886 m = td->td_lock;
887 thread_lock_validate(m, opts, file, line);
888 v = MTX_READ_VALUE(m);
889 for (;;) {
890 if (v == MTX_UNOWNED) {
891 if (_mtx_obtain_lock_fetch(m, &v, tid))
892 break;
893 continue;
894 }
895 if (v == tid) {
896 m->mtx_recurse++;
897 MPASS(m == td->td_lock);
898 break;
899 }
900 lock_profile_obtain_lock_failed(&m->lock_object,
901 &contested, &waittime);
902 /* Give interrupts a chance while we spin. */
903 spinlock_exit();
904 do {
905 if (__predict_true(lda.spin_cnt < 10000000)) {
906 lock_delay(&lda);
907 } else {
908 _mtx_lock_indefinite_check(m, &lda);
909 }
910 if (m != td->td_lock) {
911 spinlock_enter();
912 goto retry;
913 }
914 v = MTX_READ_VALUE(m);
915 } while (v != MTX_UNOWNED);
916 spinlock_enter();
917 }
918 if (m == td->td_lock)
919 break;
920 MPASS(m->mtx_recurse == 0);
921 _mtx_release_lock_quick(m);
922 }
923 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
924 line);
925 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
926
927 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
928 if (__predict_true(!doing_lockprof))
929 return;
930 #endif
931 #ifdef KDTRACE_HOOKS
932 spin_time += lockstat_nsecs(&m->lock_object);
933 #endif
934 if (m->mtx_recurse == 0)
935 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
936 contested, waittime, file, line);
937 #ifdef KDTRACE_HOOKS
938 if (lda.spin_cnt != 0)
939 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
940 #endif
941 }
942
943 struct mtx *
944 thread_lock_block(struct thread *td)
945 {
946 struct mtx *lock;
947
948 THREAD_LOCK_ASSERT(td, MA_OWNED);
949 lock = td->td_lock;
950 td->td_lock = &blocked_lock;
951 mtx_unlock_spin(lock);
952
953 return (lock);
954 }
955
956 void
957 thread_lock_unblock(struct thread *td, struct mtx *new)
958 {
959 mtx_assert(new, MA_OWNED);
960 MPASS(td->td_lock == &blocked_lock);
961 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
962 }
963
964 void
965 thread_lock_set(struct thread *td, struct mtx *new)
966 {
967 struct mtx *lock;
968
969 mtx_assert(new, MA_OWNED);
970 THREAD_LOCK_ASSERT(td, MA_OWNED);
971 lock = td->td_lock;
972 td->td_lock = new;
973 mtx_unlock_spin(lock);
974 }
975
976 /*
977 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
978 *
979 * We are only called here if the lock is recursed, contested (i.e. we
980 * need to wake up a blocked thread) or lockstat probe is active.
981 */
982 #if LOCK_DEBUG > 0
983 void
984 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
985 const char *file, int line)
986 #else
987 void
988 __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
989 #endif
990 {
991 struct mtx *m;
992 struct turnstile *ts;
993 uintptr_t tid;
994
995 if (SCHEDULER_STOPPED())
996 return;
997
998 tid = (uintptr_t)curthread;
999 m = mtxlock2mtx(c);
1000
1001 if (__predict_false(v == tid))
1002 v = MTX_READ_VALUE(m);
1003
1004 if (__predict_false(v & MTX_RECURSED)) {
1005 if (--(m->mtx_recurse) == 0)
1006 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1007 if (LOCK_LOG_TEST(&m->lock_object, opts))
1008 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1009 return;
1010 }
1011
1012 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1013 if (v == tid && _mtx_release_lock(m, tid))
1014 return;
1015
1016 /*
1017 * We have to lock the chain before the turnstile so this turnstile
1018 * can be removed from the hash list if it is empty.
1019 */
1020 turnstile_chain_lock(&m->lock_object);
1021 _mtx_release_lock_quick(m);
1022 ts = turnstile_lookup(&m->lock_object);
1023 MPASS(ts != NULL);
1024 if (LOCK_LOG_TEST(&m->lock_object, opts))
1025 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1026 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
1027
1028 /*
1029 * This turnstile is now no longer associated with the mutex. We can
1030 * unlock the chain lock so a new turnstile may take it's place.
1031 */
1032 turnstile_unpend(ts);
1033 turnstile_chain_unlock(&m->lock_object);
1034 }
1035
1036 /*
1037 * All the unlocking of MTX_SPIN locks is done inline.
1038 * See the __mtx_unlock_spin() macro for the details.
1039 */
1040
1041 /*
1042 * The backing function for the INVARIANTS-enabled mtx_assert()
1043 */
1044 #ifdef INVARIANT_SUPPORT
1045 void
1046 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1047 {
1048 const struct mtx *m;
1049
1050 if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
1051 return;
1052
1053 m = mtxlock2mtx(c);
1054
1055 switch (what) {
1056 case MA_OWNED:
1057 case MA_OWNED | MA_RECURSED:
1058 case MA_OWNED | MA_NOTRECURSED:
1059 if (!mtx_owned(m))
1060 panic("mutex %s not owned at %s:%d",
1061 m->lock_object.lo_name, file, line);
1062 if (mtx_recursed(m)) {
1063 if ((what & MA_NOTRECURSED) != 0)
1064 panic("mutex %s recursed at %s:%d",
1065 m->lock_object.lo_name, file, line);
1066 } else if ((what & MA_RECURSED) != 0) {
1067 panic("mutex %s unrecursed at %s:%d",
1068 m->lock_object.lo_name, file, line);
1069 }
1070 break;
1071 case MA_NOTOWNED:
1072 if (mtx_owned(m))
1073 panic("mutex %s owned at %s:%d",
1074 m->lock_object.lo_name, file, line);
1075 break;
1076 default:
1077 panic("unknown mtx_assert at %s:%d", file, line);
1078 }
1079 }
1080 #endif
1081
1082 /*
1083 * General init routine used by the MTX_SYSINIT() macro.
1084 */
1085 void
1086 mtx_sysinit(void *arg)
1087 {
1088 struct mtx_args *margs = arg;
1089
1090 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1091 margs->ma_opts);
1092 }
1093
1094 /*
1095 * Mutex initialization routine; initialize lock `m' of type contained in
1096 * `opts' with options contained in `opts' and name `name.' The optional
1097 * lock type `type' is used as a general lock category name for use with
1098 * witness.
1099 */
1100 void
1101 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1102 {
1103 struct mtx *m;
1104 struct lock_class *class;
1105 int flags;
1106
1107 m = mtxlock2mtx(c);
1108
1109 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1110 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1111 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1112 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1113 &m->mtx_lock));
1114
1115 /* Determine lock class and lock flags. */
1116 if (opts & MTX_SPIN)
1117 class = &lock_class_mtx_spin;
1118 else
1119 class = &lock_class_mtx_sleep;
1120 flags = 0;
1121 if (opts & MTX_QUIET)
1122 flags |= LO_QUIET;
1123 if (opts & MTX_RECURSE)
1124 flags |= LO_RECURSABLE;
1125 if ((opts & MTX_NOWITNESS) == 0)
1126 flags |= LO_WITNESS;
1127 if (opts & MTX_DUPOK)
1128 flags |= LO_DUPOK;
1129 if (opts & MTX_NOPROFILE)
1130 flags |= LO_NOPROFILE;
1131 if (opts & MTX_NEW)
1132 flags |= LO_NEW;
1133
1134 /* Initialize mutex. */
1135 lock_init(&m->lock_object, class, name, type, flags);
1136
1137 m->mtx_lock = MTX_UNOWNED;
1138 m->mtx_recurse = 0;
1139 }
1140
1141 /*
1142 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1143 * passed in as a flag here because if the corresponding mtx_init() was
1144 * called with MTX_QUIET set, then it will already be set in the mutex's
1145 * flags.
1146 */
1147 void
1148 _mtx_destroy(volatile uintptr_t *c)
1149 {
1150 struct mtx *m;
1151
1152 m = mtxlock2mtx(c);
1153
1154 if (!mtx_owned(m))
1155 MPASS(mtx_unowned(m));
1156 else {
1157 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1158
1159 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1160 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1161 spinlock_exit();
1162 else
1163 TD_LOCKS_DEC(curthread);
1164
1165 lock_profile_release_lock(&m->lock_object);
1166 /* Tell witness this isn't locked to make it happy. */
1167 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1168 __LINE__);
1169 }
1170
1171 m->mtx_lock = MTX_DESTROYED;
1172 lock_destroy(&m->lock_object);
1173 }
1174
1175 /*
1176 * Intialize the mutex code and system mutexes. This is called from the MD
1177 * startup code prior to mi_startup(). The per-CPU data space needs to be
1178 * setup before this is called.
1179 */
1180 void
1181 mutex_init(void)
1182 {
1183
1184 /* Setup turnstiles so that sleep mutexes work. */
1185 init_turnstiles();
1186
1187 /*
1188 * Initialize mutexes.
1189 */
1190 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1191 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1192 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1193 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1194 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1195 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1196 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1197 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1198 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1199 mtx_lock(&Giant);
1200 }
1201
1202 static void __noinline
1203 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1204 {
1205 struct thread *td;
1206
1207 ldap->spin_cnt++;
1208 if (ldap->spin_cnt < 60000000 || kdb_active || panicstr != NULL)
1209 cpu_lock_delay();
1210 else {
1211 td = mtx_owner(m);
1212
1213 /* If the mutex is unlocked, try again. */
1214 if (td == NULL)
1215 return;
1216
1217 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
1218 m, m->lock_object.lo_name, td, td->td_tid);
1219 #ifdef WITNESS
1220 witness_display_spinlock(&m->lock_object, td, printf);
1221 #endif
1222 panic("spin lock held too long");
1223 }
1224 cpu_spinwait();
1225 }
1226
1227 void
1228 mtx_spin_wait_unlocked(struct mtx *m)
1229 {
1230 struct lock_delay_arg lda;
1231
1232 KASSERT(m->mtx_lock != MTX_DESTROYED,
1233 ("%s() of destroyed mutex %p", __func__, m));
1234 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1235 ("%s() of sleep mutex %p (%s)", __func__, m,
1236 m->lock_object.lo_name));
1237 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1238 m->lock_object.lo_name));
1239
1240 lda.spin_cnt = 0;
1241
1242 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1243 if (__predict_true(lda.spin_cnt < 10000000)) {
1244 cpu_spinwait();
1245 lda.spin_cnt++;
1246 } else {
1247 _mtx_lock_indefinite_check(m, &lda);
1248 }
1249 }
1250 }
1251
1252 #ifdef DDB
1253 void
1254 db_show_mtx(const struct lock_object *lock)
1255 {
1256 struct thread *td;
1257 const struct mtx *m;
1258
1259 m = (const struct mtx *)lock;
1260
1261 db_printf(" flags: {");
1262 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1263 db_printf("SPIN");
1264 else
1265 db_printf("DEF");
1266 if (m->lock_object.lo_flags & LO_RECURSABLE)
1267 db_printf(", RECURSE");
1268 if (m->lock_object.lo_flags & LO_DUPOK)
1269 db_printf(", DUPOK");
1270 db_printf("}\n");
1271 db_printf(" state: {");
1272 if (mtx_unowned(m))
1273 db_printf("UNOWNED");
1274 else if (mtx_destroyed(m))
1275 db_printf("DESTROYED");
1276 else {
1277 db_printf("OWNED");
1278 if (m->mtx_lock & MTX_CONTESTED)
1279 db_printf(", CONTESTED");
1280 if (m->mtx_lock & MTX_RECURSED)
1281 db_printf(", RECURSED");
1282 }
1283 db_printf("}\n");
1284 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1285 td = mtx_owner(m);
1286 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1287 td->td_tid, td->td_proc->p_pid, td->td_name);
1288 if (mtx_recursed(m))
1289 db_printf(" recursed: %d\n", m->mtx_recurse);
1290 }
1291 }
1292 #endif
Cache object: 7c34e6615895a55e321550398d751464
|