FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_sched.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/sysctl.h>
59 #include <sys/turnstile.h>
60 #include <sys/vmmeter.h>
61 #include <sys/lock_profile.h>
62
63 #include <machine/atomic.h>
64 #include <machine/bus.h>
65 #include <machine/cpu.h>
66
67 #include <ddb/ddb.h>
68
69 #include <fs/devfs/devfs_int.h>
70
71 #include <vm/vm.h>
72 #include <vm/vm_extern.h>
73
74 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
75 #define ADAPTIVE_MUTEXES
76 #endif
77
78 /*
79 * Internal utility macros.
80 */
81 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
82
83 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
84
85 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
86
87 #ifdef DDB
88 static void db_show_mtx(struct lock_object *lock);
89 #endif
90 static void lock_mtx(struct lock_object *lock, int how);
91 static void lock_spin(struct lock_object *lock, int how);
92 static int unlock_mtx(struct lock_object *lock);
93 static int unlock_spin(struct lock_object *lock);
94
95 /*
96 * Lock classes for sleep and spin mutexes.
97 */
98 struct lock_class lock_class_mtx_sleep = {
99 .lc_name = "sleep mutex",
100 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
101 #ifdef DDB
102 .lc_ddb_show = db_show_mtx,
103 #endif
104 .lc_lock = lock_mtx,
105 .lc_unlock = unlock_mtx,
106 };
107 struct lock_class lock_class_mtx_spin = {
108 .lc_name = "spin mutex",
109 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
110 #ifdef DDB
111 .lc_ddb_show = db_show_mtx,
112 #endif
113 .lc_lock = lock_spin,
114 .lc_unlock = unlock_spin,
115 };
116
117 /*
118 * System-wide mutexes
119 */
120 struct mtx blocked_lock;
121 struct mtx Giant;
122
123 #ifdef LOCK_PROFILING
124 static inline void lock_profile_init(void)
125 {
126 int i;
127 /* Initialize the mutex profiling locks */
128 for (i = 0; i < LPROF_LOCK_SIZE; i++) {
129 mtx_init(&lprof_locks[i], "mprof lock",
130 NULL, MTX_SPIN|MTX_QUIET|MTX_NOPROFILE);
131 }
132 }
133 #else
134 static inline void lock_profile_init(void) {;}
135 #endif
136
137 void
138 lock_mtx(struct lock_object *lock, int how)
139 {
140
141 mtx_lock((struct mtx *)lock);
142 }
143
144 void
145 lock_spin(struct lock_object *lock, int how)
146 {
147
148 panic("spin locks can only use msleep_spin");
149 }
150
151 int
152 unlock_mtx(struct lock_object *lock)
153 {
154 struct mtx *m;
155
156 m = (struct mtx *)lock;
157 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
158 mtx_unlock(m);
159 return (0);
160 }
161
162 int
163 unlock_spin(struct lock_object *lock)
164 {
165
166 panic("spin locks can only use msleep_spin");
167 }
168
169 /*
170 * Function versions of the inlined __mtx_* macros. These are used by
171 * modules and can also be called from assembly language if needed.
172 */
173 void
174 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
175 {
176
177 MPASS(curthread != NULL);
178 KASSERT(m->mtx_lock != MTX_DESTROYED,
179 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
180 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
181 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
182 file, line));
183 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
184 file, line);
185
186 _get_sleep_lock(m, curthread, opts, file, line);
187 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
188 line);
189 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
190 curthread->td_locks++;
191 }
192
193 void
194 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
195 {
196 MPASS(curthread != NULL);
197 KASSERT(m->mtx_lock != MTX_DESTROYED,
198 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
199 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
200 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
201 file, line));
202 curthread->td_locks--;
203 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
204 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
205 line);
206 mtx_assert(m, MA_OWNED);
207
208 if (m->mtx_recurse == 0)
209 lock_profile_release_lock(&m->lock_object);
210 _rel_sleep_lock(m, curthread, opts, file, line);
211 }
212
213 void
214 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
215 {
216
217 MPASS(curthread != NULL);
218 KASSERT(m->mtx_lock != MTX_DESTROYED,
219 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
220 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
221 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
222 m->lock_object.lo_name, file, line));
223 if (mtx_owned(m))
224 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
225 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
226 m->lock_object.lo_name, file, line));
227 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
228 file, line);
229 _get_spin_lock(m, curthread, opts, file, line);
230 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
231 line);
232 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
233 }
234
235 void
236 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
237 {
238
239 MPASS(curthread != NULL);
240 KASSERT(m->mtx_lock != MTX_DESTROYED,
241 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
242 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
243 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
244 m->lock_object.lo_name, file, line));
245 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
246 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
247 line);
248 mtx_assert(m, MA_OWNED);
249
250 _rel_spin_lock(m);
251 }
252
253 /*
254 * The important part of mtx_trylock{,_flags}()
255 * Tries to acquire lock `m.' If this function is called on a mutex that
256 * is already owned, it will recursively acquire the lock.
257 */
258 int
259 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
260 {
261 int rval, contested = 0;
262 uint64_t waittime = 0;
263
264 MPASS(curthread != NULL);
265 KASSERT(m->mtx_lock != MTX_DESTROYED,
266 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
267 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
268 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
269 file, line));
270
271 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
272 m->mtx_recurse++;
273 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
274 rval = 1;
275 } else
276 rval = _obtain_lock(m, (uintptr_t)curthread);
277
278 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
279 if (rval) {
280 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
281 file, line);
282 curthread->td_locks++;
283 if (m->mtx_recurse == 0)
284 lock_profile_obtain_lock_success(&m->lock_object, contested,
285 waittime, file, line);
286
287 }
288
289 return (rval);
290 }
291
292 /*
293 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
294 *
295 * We call this if the lock is either contested (i.e. we need to go to
296 * sleep waiting for it), or if we need to recurse on it.
297 */
298 void
299 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
300 int line)
301 {
302 struct turnstile *ts;
303 #ifdef ADAPTIVE_MUTEXES
304 volatile struct thread *owner;
305 #endif
306 #ifdef KTR
307 int cont_logged = 0;
308 #endif
309 int contested = 0;
310 uint64_t waittime = 0;
311 uintptr_t v;
312
313 if (mtx_owned(m)) {
314 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
315 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
316 m->lock_object.lo_name, file, line));
317 m->mtx_recurse++;
318 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
319 if (LOCK_LOG_TEST(&m->lock_object, opts))
320 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
321 return;
322 }
323
324 lock_profile_obtain_lock_failed(&m->lock_object,
325 &contested, &waittime);
326 if (LOCK_LOG_TEST(&m->lock_object, opts))
327 CTR4(KTR_LOCK,
328 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
329 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
330
331 while (!_obtain_lock(m, tid)) {
332 #ifdef ADAPTIVE_MUTEXES
333 /*
334 * If the owner is running on another CPU, spin until the
335 * owner stops running or the state of the lock changes.
336 */
337 v = m->mtx_lock;
338 if (v != MTX_UNOWNED) {
339 owner = (struct thread *)(v & ~MTX_FLAGMASK);
340 #ifdef ADAPTIVE_GIANT
341 if (TD_IS_RUNNING(owner)) {
342 #else
343 if (m != &Giant && TD_IS_RUNNING(owner)) {
344 #endif
345 if (LOCK_LOG_TEST(&m->lock_object, 0))
346 CTR3(KTR_LOCK,
347 "%s: spinning on %p held by %p",
348 __func__, m, owner);
349 while (mtx_owner(m) == owner &&
350 TD_IS_RUNNING(owner))
351 cpu_spinwait();
352 continue;
353 }
354 }
355 #endif
356
357 ts = turnstile_trywait(&m->lock_object);
358 v = m->mtx_lock;
359
360 /*
361 * Check if the lock has been released while spinning for
362 * the turnstile chain lock.
363 */
364 if (v == MTX_UNOWNED) {
365 turnstile_cancel(ts);
366 cpu_spinwait();
367 continue;
368 }
369
370 MPASS(v != MTX_CONTESTED);
371
372 #ifdef ADAPTIVE_MUTEXES
373 /*
374 * If the current owner of the lock is executing on another
375 * CPU quit the hard path and try to spin.
376 */
377 owner = (struct thread *)(v & ~MTX_FLAGMASK);
378 #ifdef ADAPTIVE_GIANT
379 if (TD_IS_RUNNING(owner)) {
380 #else
381 if (m != &Giant && TD_IS_RUNNING(owner)) {
382 #endif
383 turnstile_cancel(ts);
384 cpu_spinwait();
385 continue;
386 }
387 #endif
388
389 /*
390 * If the mutex isn't already contested and a failure occurs
391 * setting the contested bit, the mutex was either released
392 * or the state of the MTX_RECURSED bit changed.
393 */
394 if ((v & MTX_CONTESTED) == 0 &&
395 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
396 turnstile_cancel(ts);
397 cpu_spinwait();
398 continue;
399 }
400
401 /*
402 * We definitely must sleep for this lock.
403 */
404 mtx_assert(m, MA_NOTOWNED);
405
406 #ifdef KTR
407 if (!cont_logged) {
408 CTR6(KTR_CONTENTION,
409 "contention: %p at %s:%d wants %s, taken by %s:%d",
410 (void *)tid, file, line, m->lock_object.lo_name,
411 WITNESS_FILE(&m->lock_object),
412 WITNESS_LINE(&m->lock_object));
413 cont_logged = 1;
414 }
415 #endif
416
417 /*
418 * Block on the turnstile.
419 */
420 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
421 }
422 #ifdef KTR
423 if (cont_logged) {
424 CTR4(KTR_CONTENTION,
425 "contention end: %s acquired by %p at %s:%d",
426 m->lock_object.lo_name, (void *)tid, file, line);
427 }
428 #endif
429 lock_profile_obtain_lock_success(&m->lock_object, contested,
430 waittime, (file), (line));
431 }
432
433 static void
434 _mtx_lock_spin_failed(struct mtx *m)
435 {
436 struct thread *td;
437
438 td = mtx_owner(m);
439
440 /* If the mutex is unlocked, try again. */
441 if (td == NULL)
442 return;
443
444 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
445 m, m->lock_object.lo_name, td, td->td_tid);
446 #ifdef WITNESS
447 witness_display_spinlock(&m->lock_object, td);
448 #endif
449 panic("spin lock held too long");
450 }
451
452 #ifdef SMP
453 /*
454 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
455 *
456 * This is only called if we need to actually spin for the lock. Recursion
457 * is handled inline.
458 */
459 void
460 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
461 int line)
462 {
463 int i = 0, contested = 0;
464 uint64_t waittime = 0;
465
466 if (LOCK_LOG_TEST(&m->lock_object, opts))
467 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
468
469 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
470 while (!_obtain_lock(m, tid)) {
471
472 /* Give interrupts a chance while we spin. */
473 spinlock_exit();
474 while (m->mtx_lock != MTX_UNOWNED) {
475 if (i++ < 10000000) {
476 cpu_spinwait();
477 continue;
478 }
479 if (i < 60000000 || kdb_active || panicstr != NULL)
480 DELAY(1);
481 else
482 _mtx_lock_spin_failed(m);
483 cpu_spinwait();
484 }
485 spinlock_enter();
486 }
487
488 if (LOCK_LOG_TEST(&m->lock_object, opts))
489 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
490
491 lock_profile_obtain_lock_success(&m->lock_object, contested,
492 waittime, (file), (line));
493 }
494 #endif /* SMP */
495
496 void
497 _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
498 {
499 struct mtx *m;
500 uintptr_t tid;
501 int i, contested;
502 uint64_t waittime;
503
504 contested = i = 0;
505 waittime = 0;
506 tid = (uintptr_t)curthread;
507 for (;;) {
508 retry:
509 spinlock_enter();
510 m = td->td_lock;
511 KASSERT(m->mtx_lock != MTX_DESTROYED,
512 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
513 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
514 ("thread_lock() of sleep mutex %s @ %s:%d",
515 m->lock_object.lo_name, file, line));
516 if (mtx_owned(m))
517 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
518 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
519 m->lock_object.lo_name, file, line));
520 WITNESS_CHECKORDER(&m->lock_object,
521 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line);
522 while (!_obtain_lock(m, tid)) {
523 if (m->mtx_lock == tid) {
524 m->mtx_recurse++;
525 break;
526 }
527 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
528 /* Give interrupts a chance while we spin. */
529 spinlock_exit();
530 while (m->mtx_lock != MTX_UNOWNED) {
531 if (i++ < 10000000)
532 cpu_spinwait();
533 else if (i < 60000000 ||
534 kdb_active || panicstr != NULL)
535 DELAY(1);
536 else
537 _mtx_lock_spin_failed(m);
538 cpu_spinwait();
539 if (m != td->td_lock)
540 goto retry;
541 }
542 spinlock_enter();
543 }
544 if (m == td->td_lock)
545 break;
546 _rel_spin_lock(m); /* does spinlock_exit() */
547 }
548 lock_profile_obtain_lock_success(&m->lock_object, contested,
549 waittime, (file), (line));
550 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
551 line);
552 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
553 }
554
555 struct mtx *
556 thread_lock_block(struct thread *td)
557 {
558 struct mtx *lock;
559
560 spinlock_enter();
561 THREAD_LOCK_ASSERT(td, MA_OWNED);
562 lock = td->td_lock;
563 td->td_lock = &blocked_lock;
564 mtx_unlock_spin(lock);
565
566 return (lock);
567 }
568
569 void
570 thread_lock_unblock(struct thread *td, struct mtx *new)
571 {
572 mtx_assert(new, MA_OWNED);
573 MPASS(td->td_lock == &blocked_lock);
574 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
575 spinlock_exit();
576 }
577
578 void
579 thread_lock_set(struct thread *td, struct mtx *new)
580 {
581 struct mtx *lock;
582
583 mtx_assert(new, MA_OWNED);
584 THREAD_LOCK_ASSERT(td, MA_OWNED);
585 lock = td->td_lock;
586 td->td_lock = new;
587 mtx_unlock_spin(lock);
588 }
589
590 /*
591 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
592 *
593 * We are only called here if the lock is recursed or contested (i.e. we
594 * need to wake up a blocked thread).
595 */
596 void
597 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
598 {
599 struct turnstile *ts;
600
601 if (mtx_recursed(m)) {
602 if (--(m->mtx_recurse) == 0)
603 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
604 if (LOCK_LOG_TEST(&m->lock_object, opts))
605 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
606 return;
607 }
608
609 /*
610 * We have to lock the chain before the turnstile so this turnstile
611 * can be removed from the hash list if it is empty.
612 */
613 turnstile_chain_lock(&m->lock_object);
614 ts = turnstile_lookup(&m->lock_object);
615 if (LOCK_LOG_TEST(&m->lock_object, opts))
616 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
617
618 MPASS(ts != NULL);
619 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
620 _release_lock_quick(m);
621 /*
622 * This turnstile is now no longer associated with the mutex. We can
623 * unlock the chain lock so a new turnstile may take it's place.
624 */
625 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
626 turnstile_chain_unlock(&m->lock_object);
627 }
628
629 /*
630 * All the unlocking of MTX_SPIN locks is done inline.
631 * See the _rel_spin_lock() macro for the details.
632 */
633
634 /*
635 * The backing function for the INVARIANTS-enabled mtx_assert()
636 */
637 #ifdef INVARIANT_SUPPORT
638 void
639 _mtx_assert(struct mtx *m, int what, const char *file, int line)
640 {
641
642 if (panicstr != NULL || dumping)
643 return;
644 switch (what) {
645 case MA_OWNED:
646 case MA_OWNED | MA_RECURSED:
647 case MA_OWNED | MA_NOTRECURSED:
648 if (!mtx_owned(m))
649 panic("mutex %s not owned at %s:%d",
650 m->lock_object.lo_name, file, line);
651 if (mtx_recursed(m)) {
652 if ((what & MA_NOTRECURSED) != 0)
653 panic("mutex %s recursed at %s:%d",
654 m->lock_object.lo_name, file, line);
655 } else if ((what & MA_RECURSED) != 0) {
656 panic("mutex %s unrecursed at %s:%d",
657 m->lock_object.lo_name, file, line);
658 }
659 break;
660 case MA_NOTOWNED:
661 if (mtx_owned(m))
662 panic("mutex %s owned at %s:%d",
663 m->lock_object.lo_name, file, line);
664 break;
665 default:
666 panic("unknown mtx_assert at %s:%d", file, line);
667 }
668 }
669 #endif
670
671 /*
672 * The MUTEX_DEBUG-enabled mtx_validate()
673 *
674 * Most of these checks have been moved off into the LO_INITIALIZED flag
675 * maintained by the witness code.
676 */
677 #ifdef MUTEX_DEBUG
678
679 void mtx_validate(struct mtx *);
680
681 void
682 mtx_validate(struct mtx *m)
683 {
684
685 /*
686 * XXX: When kernacc() does not require Giant we can reenable this check
687 */
688 #ifdef notyet
689 /*
690 * Can't call kernacc() from early init386(), especially when
691 * initializing Giant mutex, because some stuff in kernacc()
692 * requires Giant itself.
693 */
694 if (!cold)
695 if (!kernacc((caddr_t)m, sizeof(m),
696 VM_PROT_READ | VM_PROT_WRITE))
697 panic("Can't read and write to mutex %p", m);
698 #endif
699 }
700 #endif
701
702 /*
703 * General init routine used by the MTX_SYSINIT() macro.
704 */
705 void
706 mtx_sysinit(void *arg)
707 {
708 struct mtx_args *margs = arg;
709
710 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
711 }
712
713 /*
714 * Mutex initialization routine; initialize lock `m' of type contained in
715 * `opts' with options contained in `opts' and name `name.' The optional
716 * lock type `type' is used as a general lock category name for use with
717 * witness.
718 */
719 void
720 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
721 {
722 struct lock_class *class;
723 int flags;
724
725 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
726 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
727
728 #ifdef MUTEX_DEBUG
729 /* Diagnostic and error correction */
730 mtx_validate(m);
731 #endif
732
733 /* Determine lock class and lock flags. */
734 if (opts & MTX_SPIN)
735 class = &lock_class_mtx_spin;
736 else
737 class = &lock_class_mtx_sleep;
738 flags = 0;
739 if (opts & MTX_QUIET)
740 flags |= LO_QUIET;
741 if (opts & MTX_RECURSE)
742 flags |= LO_RECURSABLE;
743 if ((opts & MTX_NOWITNESS) == 0)
744 flags |= LO_WITNESS;
745 if (opts & MTX_DUPOK)
746 flags |= LO_DUPOK;
747 if (opts & MTX_NOPROFILE)
748 flags |= LO_NOPROFILE;
749
750 /* Initialize mutex. */
751 m->mtx_lock = MTX_UNOWNED;
752 m->mtx_recurse = 0;
753
754 lock_init(&m->lock_object, class, name, type, flags);
755 }
756
757 /*
758 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
759 * passed in as a flag here because if the corresponding mtx_init() was
760 * called with MTX_QUIET set, then it will already be set in the mutex's
761 * flags.
762 */
763 void
764 mtx_destroy(struct mtx *m)
765 {
766
767 if (!mtx_owned(m))
768 MPASS(mtx_unowned(m));
769 else {
770 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
771
772 /* Perform the non-mtx related part of mtx_unlock_spin(). */
773 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
774 spinlock_exit();
775 else
776 curthread->td_locks--;
777
778 /* Tell witness this isn't locked to make it happy. */
779 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
780 __LINE__);
781 }
782
783 m->mtx_lock = MTX_DESTROYED;
784 lock_destroy(&m->lock_object);
785 }
786
787 /*
788 * Intialize the mutex code and system mutexes. This is called from the MD
789 * startup code prior to mi_startup(). The per-CPU data space needs to be
790 * setup before this is called.
791 */
792 void
793 mutex_init(void)
794 {
795
796 /* Setup turnstiles so that sleep mutexes work. */
797 init_turnstiles();
798
799 /*
800 * Initialize mutexes.
801 */
802 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
803 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
804 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
805 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
806 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
807 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
808 mtx_lock(&Giant);
809
810 lock_profile_init();
811 }
812
813 #ifdef DDB
814 void
815 db_show_mtx(struct lock_object *lock)
816 {
817 struct thread *td;
818 struct mtx *m;
819
820 m = (struct mtx *)lock;
821
822 db_printf(" flags: {");
823 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
824 db_printf("SPIN");
825 else
826 db_printf("DEF");
827 if (m->lock_object.lo_flags & LO_RECURSABLE)
828 db_printf(", RECURSE");
829 if (m->lock_object.lo_flags & LO_DUPOK)
830 db_printf(", DUPOK");
831 db_printf("}\n");
832 db_printf(" state: {");
833 if (mtx_unowned(m))
834 db_printf("UNOWNED");
835 else if (mtx_destroyed(m))
836 db_printf("DESTROYED");
837 else {
838 db_printf("OWNED");
839 if (m->mtx_lock & MTX_CONTESTED)
840 db_printf(", CONTESTED");
841 if (m->mtx_lock & MTX_RECURSED)
842 db_printf(", RECURSED");
843 }
844 db_printf("}\n");
845 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
846 td = mtx_owner(m);
847 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
848 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
849 if (mtx_recursed(m))
850 db_printf(" recursed: %d\n", m->mtx_recurse);
851 }
852 }
853 #endif
Cache object: 1cb07ba556bd8a75dbca58c85549507c
|