FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/8.4/sys/kern/kern_mutex.c 235502 2012-05-16 09:03:29Z avg $");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_kdtrace.h"
43 #include "opt_sched.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/sbuf.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67
68 #include <ddb/ddb.h>
69
70 #include <fs/devfs/devfs_int.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
77 #endif
78
79 /*
80 * Internal utility macros.
81 */
82 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
83
84 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
85
86 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
87
88 static void assert_mtx(struct lock_object *lock, int what);
89 #ifdef DDB
90 static void db_show_mtx(struct lock_object *lock);
91 #endif
92 static void lock_mtx(struct lock_object *lock, int how);
93 static void lock_spin(struct lock_object *lock, int how);
94 #ifdef KDTRACE_HOOKS
95 static int owner_mtx(struct lock_object *lock, struct thread **owner);
96 #endif
97 static int unlock_mtx(struct lock_object *lock);
98 static int unlock_spin(struct lock_object *lock);
99
100 /*
101 * Lock classes for sleep and spin mutexes.
102 */
103 struct lock_class lock_class_mtx_sleep = {
104 .lc_name = "sleep mutex",
105 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
106 .lc_assert = assert_mtx,
107 #ifdef DDB
108 .lc_ddb_show = db_show_mtx,
109 #endif
110 .lc_lock = lock_mtx,
111 .lc_unlock = unlock_mtx,
112 #ifdef KDTRACE_HOOKS
113 .lc_owner = owner_mtx,
114 #endif
115 };
116 struct lock_class lock_class_mtx_spin = {
117 .lc_name = "spin mutex",
118 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
119 .lc_assert = assert_mtx,
120 #ifdef DDB
121 .lc_ddb_show = db_show_mtx,
122 #endif
123 .lc_lock = lock_spin,
124 .lc_unlock = unlock_spin,
125 #ifdef KDTRACE_HOOKS
126 .lc_owner = owner_mtx,
127 #endif
128 };
129
130 /*
131 * System-wide mutexes
132 */
133 struct mtx blocked_lock;
134 struct mtx Giant;
135
136 void
137 assert_mtx(struct lock_object *lock, int what)
138 {
139
140 mtx_assert((struct mtx *)lock, what);
141 }
142
143 void
144 lock_mtx(struct lock_object *lock, int how)
145 {
146
147 mtx_lock((struct mtx *)lock);
148 }
149
150 void
151 lock_spin(struct lock_object *lock, int how)
152 {
153
154 panic("spin locks can only use msleep_spin");
155 }
156
157 int
158 unlock_mtx(struct lock_object *lock)
159 {
160 struct mtx *m;
161
162 m = (struct mtx *)lock;
163 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
164 mtx_unlock(m);
165 return (0);
166 }
167
168 int
169 unlock_spin(struct lock_object *lock)
170 {
171
172 panic("spin locks can only use msleep_spin");
173 }
174
175 #ifdef KDTRACE_HOOKS
176 int
177 owner_mtx(struct lock_object *lock, struct thread **owner)
178 {
179 struct mtx *m = (struct mtx *)lock;
180
181 *owner = mtx_owner(m);
182 return (mtx_unowned(m) == 0);
183 }
184 #endif
185
186 /*
187 * Function versions of the inlined __mtx_* macros. These are used by
188 * modules and can also be called from assembly language if needed.
189 */
190 void
191 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
192 {
193
194 if (SCHEDULER_STOPPED())
195 return;
196 MPASS(curthread != NULL);
197 KASSERT(m->mtx_lock != MTX_DESTROYED,
198 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
199 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
200 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
201 file, line));
202 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
203 file, line, NULL);
204
205 _get_sleep_lock(m, curthread, opts, file, line);
206 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
207 line);
208 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
209 curthread->td_locks++;
210 }
211
212 void
213 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
214 {
215
216 if (SCHEDULER_STOPPED())
217 return;
218 MPASS(curthread != NULL);
219 KASSERT(m->mtx_lock != MTX_DESTROYED,
220 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
221 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
222 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
223 file, line));
224 curthread->td_locks--;
225 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
226 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
227 line);
228 mtx_assert(m, MA_OWNED);
229
230 if (m->mtx_recurse == 0)
231 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
232 _rel_sleep_lock(m, curthread, opts, file, line);
233 }
234
235 void
236 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
237 {
238
239 if (SCHEDULER_STOPPED())
240 return;
241 MPASS(curthread != NULL);
242 KASSERT(m->mtx_lock != MTX_DESTROYED,
243 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
244 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
245 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
246 m->lock_object.lo_name, file, line));
247 if (mtx_owned(m))
248 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
249 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
250 m->lock_object.lo_name, file, line));
251 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
252 file, line, NULL);
253 _get_spin_lock(m, curthread, opts, file, line);
254 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
255 line);
256 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
257 }
258
259 void
260 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
261 {
262
263 if (SCHEDULER_STOPPED())
264 return;
265 MPASS(curthread != NULL);
266 KASSERT(m->mtx_lock != MTX_DESTROYED,
267 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
268 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
269 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
270 m->lock_object.lo_name, file, line));
271 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
272 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
273 line);
274 mtx_assert(m, MA_OWNED);
275
276 _rel_spin_lock(m);
277 }
278
279 /*
280 * The important part of mtx_trylock{,_flags}()
281 * Tries to acquire lock `m.' If this function is called on a mutex that
282 * is already owned, it will recursively acquire the lock.
283 */
284 int
285 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
286 {
287 #ifdef LOCK_PROFILING
288 uint64_t waittime = 0;
289 int contested = 0;
290 #endif
291 int rval;
292
293 if (SCHEDULER_STOPPED())
294 return (1);
295
296 MPASS(curthread != NULL);
297 KASSERT(m->mtx_lock != MTX_DESTROYED,
298 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
299 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
300 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
301 file, line));
302
303 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
304 m->mtx_recurse++;
305 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
306 rval = 1;
307 } else
308 rval = _obtain_lock(m, (uintptr_t)curthread);
309
310 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
311 if (rval) {
312 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
313 file, line);
314 curthread->td_locks++;
315 if (m->mtx_recurse == 0)
316 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
317 m, contested, waittime, file, line);
318
319 }
320
321 return (rval);
322 }
323
324 /*
325 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
326 *
327 * We call this if the lock is either contested (i.e. we need to go to
328 * sleep waiting for it), or if we need to recurse on it.
329 */
330 void
331 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
332 int line)
333 {
334 struct turnstile *ts;
335 uintptr_t v;
336 #ifdef ADAPTIVE_MUTEXES
337 volatile struct thread *owner;
338 #endif
339 #ifdef KTR
340 int cont_logged = 0;
341 #endif
342 #ifdef LOCK_PROFILING
343 int contested = 0;
344 uint64_t waittime = 0;
345 #endif
346 #ifdef KDTRACE_HOOKS
347 uint64_t spin_cnt = 0;
348 uint64_t sleep_cnt = 0;
349 int64_t sleep_time = 0;
350 #endif
351
352 if (SCHEDULER_STOPPED())
353 return;
354
355 if (mtx_owned(m)) {
356 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
357 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
358 m->lock_object.lo_name, file, line));
359 m->mtx_recurse++;
360 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
361 if (LOCK_LOG_TEST(&m->lock_object, opts))
362 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
363 return;
364 }
365
366 lock_profile_obtain_lock_failed(&m->lock_object,
367 &contested, &waittime);
368 if (LOCK_LOG_TEST(&m->lock_object, opts))
369 CTR4(KTR_LOCK,
370 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
371 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
372
373 while (!_obtain_lock(m, tid)) {
374 #ifdef KDTRACE_HOOKS
375 spin_cnt++;
376 #endif
377 #ifdef ADAPTIVE_MUTEXES
378 /*
379 * If the owner is running on another CPU, spin until the
380 * owner stops running or the state of the lock changes.
381 */
382 v = m->mtx_lock;
383 if (v != MTX_UNOWNED) {
384 owner = (struct thread *)(v & ~MTX_FLAGMASK);
385 if (TD_IS_RUNNING(owner)) {
386 if (LOCK_LOG_TEST(&m->lock_object, 0))
387 CTR3(KTR_LOCK,
388 "%s: spinning on %p held by %p",
389 __func__, m, owner);
390 while (mtx_owner(m) == owner &&
391 TD_IS_RUNNING(owner)) {
392 cpu_spinwait();
393 #ifdef KDTRACE_HOOKS
394 spin_cnt++;
395 #endif
396 }
397 continue;
398 }
399 }
400 #endif
401
402 ts = turnstile_trywait(&m->lock_object);
403 v = m->mtx_lock;
404
405 /*
406 * Check if the lock has been released while spinning for
407 * the turnstile chain lock.
408 */
409 if (v == MTX_UNOWNED) {
410 turnstile_cancel(ts);
411 continue;
412 }
413
414 #ifdef ADAPTIVE_MUTEXES
415 /*
416 * The current lock owner might have started executing
417 * on another CPU (or the lock could have changed
418 * owners) while we were waiting on the turnstile
419 * chain lock. If so, drop the turnstile lock and try
420 * again.
421 */
422 owner = (struct thread *)(v & ~MTX_FLAGMASK);
423 if (TD_IS_RUNNING(owner)) {
424 turnstile_cancel(ts);
425 continue;
426 }
427 #endif
428
429 /*
430 * If the mutex isn't already contested and a failure occurs
431 * setting the contested bit, the mutex was either released
432 * or the state of the MTX_RECURSED bit changed.
433 */
434 if ((v & MTX_CONTESTED) == 0 &&
435 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
436 turnstile_cancel(ts);
437 continue;
438 }
439
440 /*
441 * We definitely must sleep for this lock.
442 */
443 mtx_assert(m, MA_NOTOWNED);
444
445 #ifdef KTR
446 if (!cont_logged) {
447 CTR6(KTR_CONTENTION,
448 "contention: %p at %s:%d wants %s, taken by %s:%d",
449 (void *)tid, file, line, m->lock_object.lo_name,
450 WITNESS_FILE(&m->lock_object),
451 WITNESS_LINE(&m->lock_object));
452 cont_logged = 1;
453 }
454 #endif
455
456 /*
457 * Block on the turnstile.
458 */
459 #ifdef KDTRACE_HOOKS
460 sleep_time -= lockstat_nsecs();
461 #endif
462 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
463 #ifdef KDTRACE_HOOKS
464 sleep_time += lockstat_nsecs();
465 sleep_cnt++;
466 #endif
467 }
468 #ifdef KTR
469 if (cont_logged) {
470 CTR4(KTR_CONTENTION,
471 "contention end: %s acquired by %p at %s:%d",
472 m->lock_object.lo_name, (void *)tid, file, line);
473 }
474 #endif
475 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
476 waittime, file, line);
477 #ifdef KDTRACE_HOOKS
478 if (sleep_time)
479 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
480
481 /*
482 * Only record the loops spinning and not sleeping.
483 */
484 if (spin_cnt > sleep_cnt)
485 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
486 #endif
487 }
488
489 static void
490 _mtx_lock_spin_failed(struct mtx *m)
491 {
492 struct thread *td;
493
494 td = mtx_owner(m);
495
496 /* If the mutex is unlocked, try again. */
497 if (td == NULL)
498 return;
499
500 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
501 m, m->lock_object.lo_name, td, td->td_tid);
502 #ifdef WITNESS
503 witness_display_spinlock(&m->lock_object, td, printf);
504 #endif
505 panic("spin lock held too long");
506 }
507
508 #ifdef SMP
509 /*
510 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
511 *
512 * This is only called if we need to actually spin for the lock. Recursion
513 * is handled inline.
514 */
515 void
516 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
517 int line)
518 {
519 int i = 0;
520 #ifdef LOCK_PROFILING
521 int contested = 0;
522 uint64_t waittime = 0;
523 #endif
524
525 if (SCHEDULER_STOPPED())
526 return;
527
528 if (LOCK_LOG_TEST(&m->lock_object, opts))
529 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
530
531 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
532 while (!_obtain_lock(m, tid)) {
533
534 /* Give interrupts a chance while we spin. */
535 spinlock_exit();
536 while (m->mtx_lock != MTX_UNOWNED) {
537 if (i++ < 10000000) {
538 cpu_spinwait();
539 continue;
540 }
541 if (i < 60000000 || kdb_active || panicstr != NULL)
542 DELAY(1);
543 else
544 _mtx_lock_spin_failed(m);
545 cpu_spinwait();
546 }
547 spinlock_enter();
548 }
549
550 if (LOCK_LOG_TEST(&m->lock_object, opts))
551 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
552
553 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
554 contested, waittime, (file), (line));
555 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
556 }
557 #endif /* SMP */
558
559 void
560 _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
561 {
562 struct mtx *m;
563 uintptr_t tid;
564 int i;
565 #ifdef LOCK_PROFILING
566 int contested = 0;
567 uint64_t waittime = 0;
568 #endif
569 #ifdef KDTRACE_HOOKS
570 uint64_t spin_cnt = 0;
571 #endif
572
573 i = 0;
574 tid = (uintptr_t)curthread;
575
576 if (SCHEDULER_STOPPED())
577 return;
578
579 for (;;) {
580 retry:
581 spinlock_enter();
582 m = td->td_lock;
583 KASSERT(m->mtx_lock != MTX_DESTROYED,
584 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
585 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
586 ("thread_lock() of sleep mutex %s @ %s:%d",
587 m->lock_object.lo_name, file, line));
588 if (mtx_owned(m))
589 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
590 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
591 m->lock_object.lo_name, file, line));
592 WITNESS_CHECKORDER(&m->lock_object,
593 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
594 while (!_obtain_lock(m, tid)) {
595 #ifdef KDTRACE_HOOKS
596 spin_cnt++;
597 #endif
598 if (m->mtx_lock == tid) {
599 m->mtx_recurse++;
600 break;
601 }
602 lock_profile_obtain_lock_failed(&m->lock_object,
603 &contested, &waittime);
604 /* Give interrupts a chance while we spin. */
605 spinlock_exit();
606 while (m->mtx_lock != MTX_UNOWNED) {
607 if (i++ < 10000000)
608 cpu_spinwait();
609 else if (i < 60000000 ||
610 kdb_active || panicstr != NULL)
611 DELAY(1);
612 else
613 _mtx_lock_spin_failed(m);
614 cpu_spinwait();
615 if (m != td->td_lock)
616 goto retry;
617 }
618 spinlock_enter();
619 }
620 if (m == td->td_lock)
621 break;
622 _rel_spin_lock(m); /* does spinlock_exit() */
623 #ifdef KDTRACE_HOOKS
624 spin_cnt++;
625 #endif
626 }
627 if (m->mtx_recurse == 0)
628 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
629 m, contested, waittime, (file), (line));
630 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
631 line);
632 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
633 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
634 }
635
636 struct mtx *
637 thread_lock_block(struct thread *td)
638 {
639 struct mtx *lock;
640
641 THREAD_LOCK_ASSERT(td, MA_OWNED);
642 lock = td->td_lock;
643 td->td_lock = &blocked_lock;
644 mtx_unlock_spin(lock);
645
646 return (lock);
647 }
648
649 void
650 thread_lock_unblock(struct thread *td, struct mtx *new)
651 {
652 mtx_assert(new, MA_OWNED);
653 MPASS(td->td_lock == &blocked_lock);
654 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
655 }
656
657 void
658 thread_lock_set(struct thread *td, struct mtx *new)
659 {
660 struct mtx *lock;
661
662 mtx_assert(new, MA_OWNED);
663 THREAD_LOCK_ASSERT(td, MA_OWNED);
664 lock = td->td_lock;
665 td->td_lock = new;
666 mtx_unlock_spin(lock);
667 }
668
669 /*
670 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
671 *
672 * We are only called here if the lock is recursed or contested (i.e. we
673 * need to wake up a blocked thread).
674 */
675 void
676 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
677 {
678 struct turnstile *ts;
679
680 if (SCHEDULER_STOPPED())
681 return;
682
683 if (mtx_recursed(m)) {
684 if (--(m->mtx_recurse) == 0)
685 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
686 if (LOCK_LOG_TEST(&m->lock_object, opts))
687 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
688 return;
689 }
690
691 /*
692 * We have to lock the chain before the turnstile so this turnstile
693 * can be removed from the hash list if it is empty.
694 */
695 turnstile_chain_lock(&m->lock_object);
696 ts = turnstile_lookup(&m->lock_object);
697 if (LOCK_LOG_TEST(&m->lock_object, opts))
698 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
699 MPASS(ts != NULL);
700 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
701 _release_lock_quick(m);
702
703 /*
704 * This turnstile is now no longer associated with the mutex. We can
705 * unlock the chain lock so a new turnstile may take it's place.
706 */
707 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
708 turnstile_chain_unlock(&m->lock_object);
709 }
710
711 /*
712 * All the unlocking of MTX_SPIN locks is done inline.
713 * See the _rel_spin_lock() macro for the details.
714 */
715
716 /*
717 * The backing function for the INVARIANTS-enabled mtx_assert()
718 */
719 #ifdef INVARIANT_SUPPORT
720 void
721 _mtx_assert(struct mtx *m, int what, const char *file, int line)
722 {
723
724 if (panicstr != NULL || dumping)
725 return;
726 switch (what) {
727 case MA_OWNED:
728 case MA_OWNED | MA_RECURSED:
729 case MA_OWNED | MA_NOTRECURSED:
730 if (!mtx_owned(m))
731 panic("mutex %s not owned at %s:%d",
732 m->lock_object.lo_name, file, line);
733 if (mtx_recursed(m)) {
734 if ((what & MA_NOTRECURSED) != 0)
735 panic("mutex %s recursed at %s:%d",
736 m->lock_object.lo_name, file, line);
737 } else if ((what & MA_RECURSED) != 0) {
738 panic("mutex %s unrecursed at %s:%d",
739 m->lock_object.lo_name, file, line);
740 }
741 break;
742 case MA_NOTOWNED:
743 if (mtx_owned(m))
744 panic("mutex %s owned at %s:%d",
745 m->lock_object.lo_name, file, line);
746 break;
747 default:
748 panic("unknown mtx_assert at %s:%d", file, line);
749 }
750 }
751 #endif
752
753 /*
754 * The MUTEX_DEBUG-enabled mtx_validate()
755 *
756 * Most of these checks have been moved off into the LO_INITIALIZED flag
757 * maintained by the witness code.
758 */
759 #ifdef MUTEX_DEBUG
760
761 void mtx_validate(struct mtx *);
762
763 void
764 mtx_validate(struct mtx *m)
765 {
766
767 /*
768 * XXX: When kernacc() does not require Giant we can reenable this check
769 */
770 #ifdef notyet
771 /*
772 * Can't call kernacc() from early init386(), especially when
773 * initializing Giant mutex, because some stuff in kernacc()
774 * requires Giant itself.
775 */
776 if (!cold)
777 if (!kernacc((caddr_t)m, sizeof(m),
778 VM_PROT_READ | VM_PROT_WRITE))
779 panic("Can't read and write to mutex %p", m);
780 #endif
781 }
782 #endif
783
784 /*
785 * General init routine used by the MTX_SYSINIT() macro.
786 */
787 void
788 mtx_sysinit(void *arg)
789 {
790 struct mtx_args *margs = arg;
791
792 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
793 }
794
795 /*
796 * Mutex initialization routine; initialize lock `m' of type contained in
797 * `opts' with options contained in `opts' and name `name.' The optional
798 * lock type `type' is used as a general lock category name for use with
799 * witness.
800 */
801 void
802 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
803 {
804 struct lock_class *class;
805 int flags;
806
807 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
808 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
809 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
810 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
811 &m->mtx_lock));
812
813 #ifdef MUTEX_DEBUG
814 /* Diagnostic and error correction */
815 mtx_validate(m);
816 #endif
817
818 /* Determine lock class and lock flags. */
819 if (opts & MTX_SPIN)
820 class = &lock_class_mtx_spin;
821 else
822 class = &lock_class_mtx_sleep;
823 flags = 0;
824 if (opts & MTX_QUIET)
825 flags |= LO_QUIET;
826 if (opts & MTX_RECURSE)
827 flags |= LO_RECURSABLE;
828 if ((opts & MTX_NOWITNESS) == 0)
829 flags |= LO_WITNESS;
830 if (opts & MTX_DUPOK)
831 flags |= LO_DUPOK;
832 if (opts & MTX_NOPROFILE)
833 flags |= LO_NOPROFILE;
834
835 /* Initialize mutex. */
836 m->mtx_lock = MTX_UNOWNED;
837 m->mtx_recurse = 0;
838
839 lock_init(&m->lock_object, class, name, type, flags);
840 }
841
842 /*
843 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
844 * passed in as a flag here because if the corresponding mtx_init() was
845 * called with MTX_QUIET set, then it will already be set in the mutex's
846 * flags.
847 */
848 void
849 mtx_destroy(struct mtx *m)
850 {
851
852 if (!mtx_owned(m))
853 MPASS(mtx_unowned(m));
854 else {
855 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
856
857 /* Perform the non-mtx related part of mtx_unlock_spin(). */
858 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
859 spinlock_exit();
860 else
861 curthread->td_locks--;
862
863 lock_profile_release_lock(&m->lock_object);
864 /* Tell witness this isn't locked to make it happy. */
865 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
866 __LINE__);
867 }
868
869 m->mtx_lock = MTX_DESTROYED;
870 lock_destroy(&m->lock_object);
871 }
872
873 /*
874 * Intialize the mutex code and system mutexes. This is called from the MD
875 * startup code prior to mi_startup(). The per-CPU data space needs to be
876 * setup before this is called.
877 */
878 void
879 mutex_init(void)
880 {
881
882 /* Setup turnstiles so that sleep mutexes work. */
883 init_turnstiles();
884
885 /*
886 * Initialize mutexes.
887 */
888 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
889 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
890 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
891 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
892 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
893 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
894 mtx_lock(&Giant);
895 }
896
897 #ifdef DDB
898 void
899 db_show_mtx(struct lock_object *lock)
900 {
901 struct thread *td;
902 struct mtx *m;
903
904 m = (struct mtx *)lock;
905
906 db_printf(" flags: {");
907 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
908 db_printf("SPIN");
909 else
910 db_printf("DEF");
911 if (m->lock_object.lo_flags & LO_RECURSABLE)
912 db_printf(", RECURSE");
913 if (m->lock_object.lo_flags & LO_DUPOK)
914 db_printf(", DUPOK");
915 db_printf("}\n");
916 db_printf(" state: {");
917 if (mtx_unowned(m))
918 db_printf("UNOWNED");
919 else if (mtx_destroyed(m))
920 db_printf("DESTROYED");
921 else {
922 db_printf("OWNED");
923 if (m->mtx_lock & MTX_CONTESTED)
924 db_printf(", CONTESTED");
925 if (m->mtx_lock & MTX_RECURSED)
926 db_printf(", RECURSED");
927 }
928 db_printf("}\n");
929 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
930 td = mtx_owner(m);
931 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
932 td->td_tid, td->td_proc->p_pid, td->td_name);
933 if (mtx_recursed(m))
934 db_printf(" recursed: %d\n", m->mtx_recurse);
935 }
936 }
937 #endif
Cache object: 800a0957a83cb558ae916b3c0d03088f
|