FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_mutex.c 236238 2012-05-29 14:50:21Z fabient $");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/sysctl.h>
61 #include <sys/turnstile.h>
62 #include <sys/vmmeter.h>
63 #include <sys/lock_profile.h>
64
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
68
69 #include <ddb/ddb.h>
70
71 #include <fs/devfs/devfs_int.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75
76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
77 #define ADAPTIVE_MUTEXES
78 #endif
79
80 #ifdef HWPMC_HOOKS
81 #include <sys/pmckern.h>
82 PMC_SOFT_DEFINE( , , lock, failed);
83 #endif
84
85 /*
86 * Internal utility macros.
87 */
88 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
89
90 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
91
92 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
93
94 static void assert_mtx(struct lock_object *lock, int what);
95 #ifdef DDB
96 static void db_show_mtx(struct lock_object *lock);
97 #endif
98 static void lock_mtx(struct lock_object *lock, int how);
99 static void lock_spin(struct lock_object *lock, int how);
100 #ifdef KDTRACE_HOOKS
101 static int owner_mtx(struct lock_object *lock, struct thread **owner);
102 #endif
103 static int unlock_mtx(struct lock_object *lock);
104 static int unlock_spin(struct lock_object *lock);
105
106 /*
107 * Lock classes for sleep and spin mutexes.
108 */
109 struct lock_class lock_class_mtx_sleep = {
110 .lc_name = "sleep mutex",
111 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
112 .lc_assert = assert_mtx,
113 #ifdef DDB
114 .lc_ddb_show = db_show_mtx,
115 #endif
116 .lc_lock = lock_mtx,
117 .lc_unlock = unlock_mtx,
118 #ifdef KDTRACE_HOOKS
119 .lc_owner = owner_mtx,
120 #endif
121 };
122 struct lock_class lock_class_mtx_spin = {
123 .lc_name = "spin mutex",
124 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
125 .lc_assert = assert_mtx,
126 #ifdef DDB
127 .lc_ddb_show = db_show_mtx,
128 #endif
129 .lc_lock = lock_spin,
130 .lc_unlock = unlock_spin,
131 #ifdef KDTRACE_HOOKS
132 .lc_owner = owner_mtx,
133 #endif
134 };
135
136 /*
137 * System-wide mutexes
138 */
139 struct mtx blocked_lock;
140 struct mtx Giant;
141
142 void
143 assert_mtx(struct lock_object *lock, int what)
144 {
145
146 mtx_assert((struct mtx *)lock, what);
147 }
148
149 void
150 lock_mtx(struct lock_object *lock, int how)
151 {
152
153 mtx_lock((struct mtx *)lock);
154 }
155
156 void
157 lock_spin(struct lock_object *lock, int how)
158 {
159
160 panic("spin locks can only use msleep_spin");
161 }
162
163 int
164 unlock_mtx(struct lock_object *lock)
165 {
166 struct mtx *m;
167
168 m = (struct mtx *)lock;
169 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
170 mtx_unlock(m);
171 return (0);
172 }
173
174 int
175 unlock_spin(struct lock_object *lock)
176 {
177
178 panic("spin locks can only use msleep_spin");
179 }
180
181 #ifdef KDTRACE_HOOKS
182 int
183 owner_mtx(struct lock_object *lock, struct thread **owner)
184 {
185 struct mtx *m = (struct mtx *)lock;
186
187 *owner = mtx_owner(m);
188 return (mtx_unowned(m) == 0);
189 }
190 #endif
191
192 /*
193 * Function versions of the inlined __mtx_* macros. These are used by
194 * modules and can also be called from assembly language if needed.
195 */
196 void
197 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
198 {
199
200 if (SCHEDULER_STOPPED())
201 return;
202 MPASS(curthread != NULL);
203 KASSERT(m->mtx_lock != MTX_DESTROYED,
204 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
205 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
206 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
207 file, line));
208 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
209 file, line, NULL);
210
211 __mtx_lock(m, curthread, opts, file, line);
212 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
213 line);
214 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
215 curthread->td_locks++;
216 }
217
218 void
219 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
220 {
221
222 if (SCHEDULER_STOPPED())
223 return;
224 MPASS(curthread != NULL);
225 KASSERT(m->mtx_lock != MTX_DESTROYED,
226 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
227 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
228 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
229 file, line));
230 curthread->td_locks--;
231 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
232 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
233 line);
234 mtx_assert(m, MA_OWNED);
235
236 if (m->mtx_recurse == 0)
237 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
238 __mtx_unlock(m, curthread, opts, file, line);
239 }
240
241 void
242 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
243 {
244
245 if (SCHEDULER_STOPPED())
246 return;
247 MPASS(curthread != NULL);
248 KASSERT(m->mtx_lock != MTX_DESTROYED,
249 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
250 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
251 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
252 m->lock_object.lo_name, file, line));
253 if (mtx_owned(m))
254 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
255 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
256 m->lock_object.lo_name, file, line));
257 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
258 file, line, NULL);
259 __mtx_lock_spin(m, curthread, opts, file, line);
260 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
261 line);
262 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
263 }
264
265 void
266 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
267 {
268
269 if (SCHEDULER_STOPPED())
270 return;
271 MPASS(curthread != NULL);
272 KASSERT(m->mtx_lock != MTX_DESTROYED,
273 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
274 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
275 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
276 m->lock_object.lo_name, file, line));
277 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
278 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
279 line);
280 mtx_assert(m, MA_OWNED);
281
282 __mtx_unlock_spin(m);
283 }
284
285 /*
286 * The important part of mtx_trylock{,_flags}()
287 * Tries to acquire lock `m.' If this function is called on a mutex that
288 * is already owned, it will recursively acquire the lock.
289 */
290 int
291 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
292 {
293 #ifdef LOCK_PROFILING
294 uint64_t waittime = 0;
295 int contested = 0;
296 #endif
297 int rval;
298
299 if (SCHEDULER_STOPPED())
300 return (1);
301
302 MPASS(curthread != NULL);
303 KASSERT(m->mtx_lock != MTX_DESTROYED,
304 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
305 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
306 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
307 file, line));
308
309 if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
310 m->mtx_recurse++;
311 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
312 rval = 1;
313 } else
314 rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
315
316 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
317 if (rval) {
318 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
319 file, line);
320 curthread->td_locks++;
321 if (m->mtx_recurse == 0)
322 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
323 m, contested, waittime, file, line);
324
325 }
326
327 return (rval);
328 }
329
330 /*
331 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
332 *
333 * We call this if the lock is either contested (i.e. we need to go to
334 * sleep waiting for it), or if we need to recurse on it.
335 */
336 void
337 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
338 int line)
339 {
340 struct turnstile *ts;
341 uintptr_t v;
342 #ifdef ADAPTIVE_MUTEXES
343 volatile struct thread *owner;
344 #endif
345 #ifdef KTR
346 int cont_logged = 0;
347 #endif
348 #ifdef LOCK_PROFILING
349 int contested = 0;
350 uint64_t waittime = 0;
351 #endif
352 #ifdef KDTRACE_HOOKS
353 uint64_t spin_cnt = 0;
354 uint64_t sleep_cnt = 0;
355 int64_t sleep_time = 0;
356 #endif
357
358 if (SCHEDULER_STOPPED())
359 return;
360
361 if (mtx_owned(m)) {
362 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
363 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
364 m->lock_object.lo_name, file, line));
365 m->mtx_recurse++;
366 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
367 if (LOCK_LOG_TEST(&m->lock_object, opts))
368 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
369 return;
370 }
371
372 #ifdef HWPMC_HOOKS
373 PMC_SOFT_CALL( , , lock, failed);
374 #endif
375 lock_profile_obtain_lock_failed(&m->lock_object,
376 &contested, &waittime);
377 if (LOCK_LOG_TEST(&m->lock_object, opts))
378 CTR4(KTR_LOCK,
379 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
380 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
381
382 while (!_mtx_obtain_lock(m, tid)) {
383 #ifdef KDTRACE_HOOKS
384 spin_cnt++;
385 #endif
386 #ifdef ADAPTIVE_MUTEXES
387 /*
388 * If the owner is running on another CPU, spin until the
389 * owner stops running or the state of the lock changes.
390 */
391 v = m->mtx_lock;
392 if (v != MTX_UNOWNED) {
393 owner = (struct thread *)(v & ~MTX_FLAGMASK);
394 if (TD_IS_RUNNING(owner)) {
395 if (LOCK_LOG_TEST(&m->lock_object, 0))
396 CTR3(KTR_LOCK,
397 "%s: spinning on %p held by %p",
398 __func__, m, owner);
399 while (mtx_owner(m) == owner &&
400 TD_IS_RUNNING(owner)) {
401 cpu_spinwait();
402 #ifdef KDTRACE_HOOKS
403 spin_cnt++;
404 #endif
405 }
406 continue;
407 }
408 }
409 #endif
410
411 ts = turnstile_trywait(&m->lock_object);
412 v = m->mtx_lock;
413
414 /*
415 * Check if the lock has been released while spinning for
416 * the turnstile chain lock.
417 */
418 if (v == MTX_UNOWNED) {
419 turnstile_cancel(ts);
420 continue;
421 }
422
423 #ifdef ADAPTIVE_MUTEXES
424 /*
425 * The current lock owner might have started executing
426 * on another CPU (or the lock could have changed
427 * owners) while we were waiting on the turnstile
428 * chain lock. If so, drop the turnstile lock and try
429 * again.
430 */
431 owner = (struct thread *)(v & ~MTX_FLAGMASK);
432 if (TD_IS_RUNNING(owner)) {
433 turnstile_cancel(ts);
434 continue;
435 }
436 #endif
437
438 /*
439 * If the mutex isn't already contested and a failure occurs
440 * setting the contested bit, the mutex was either released
441 * or the state of the MTX_RECURSED bit changed.
442 */
443 if ((v & MTX_CONTESTED) == 0 &&
444 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
445 turnstile_cancel(ts);
446 continue;
447 }
448
449 /*
450 * We definitely must sleep for this lock.
451 */
452 mtx_assert(m, MA_NOTOWNED);
453
454 #ifdef KTR
455 if (!cont_logged) {
456 CTR6(KTR_CONTENTION,
457 "contention: %p at %s:%d wants %s, taken by %s:%d",
458 (void *)tid, file, line, m->lock_object.lo_name,
459 WITNESS_FILE(&m->lock_object),
460 WITNESS_LINE(&m->lock_object));
461 cont_logged = 1;
462 }
463 #endif
464
465 /*
466 * Block on the turnstile.
467 */
468 #ifdef KDTRACE_HOOKS
469 sleep_time -= lockstat_nsecs();
470 #endif
471 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
472 #ifdef KDTRACE_HOOKS
473 sleep_time += lockstat_nsecs();
474 sleep_cnt++;
475 #endif
476 }
477 #ifdef KTR
478 if (cont_logged) {
479 CTR4(KTR_CONTENTION,
480 "contention end: %s acquired by %p at %s:%d",
481 m->lock_object.lo_name, (void *)tid, file, line);
482 }
483 #endif
484 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
485 waittime, file, line);
486 #ifdef KDTRACE_HOOKS
487 if (sleep_time)
488 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
489
490 /*
491 * Only record the loops spinning and not sleeping.
492 */
493 if (spin_cnt > sleep_cnt)
494 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
495 #endif
496 }
497
498 static void
499 _mtx_lock_spin_failed(struct mtx *m)
500 {
501 struct thread *td;
502
503 td = mtx_owner(m);
504
505 /* If the mutex is unlocked, try again. */
506 if (td == NULL)
507 return;
508
509 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
510 m, m->lock_object.lo_name, td, td->td_tid);
511 #ifdef WITNESS
512 witness_display_spinlock(&m->lock_object, td, printf);
513 #endif
514 panic("spin lock held too long");
515 }
516
517 #ifdef SMP
518 /*
519 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
520 *
521 * This is only called if we need to actually spin for the lock. Recursion
522 * is handled inline.
523 */
524 void
525 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
526 int line)
527 {
528 int i = 0;
529 #ifdef LOCK_PROFILING
530 int contested = 0;
531 uint64_t waittime = 0;
532 #endif
533
534 if (SCHEDULER_STOPPED())
535 return;
536
537 if (LOCK_LOG_TEST(&m->lock_object, opts))
538 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
539
540 #ifdef HWPMC_HOOKS
541 PMC_SOFT_CALL( , , lock, failed);
542 #endif
543 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
544 while (!_mtx_obtain_lock(m, tid)) {
545
546 /* Give interrupts a chance while we spin. */
547 spinlock_exit();
548 while (m->mtx_lock != MTX_UNOWNED) {
549 if (i++ < 10000000) {
550 cpu_spinwait();
551 continue;
552 }
553 if (i < 60000000 || kdb_active || panicstr != NULL)
554 DELAY(1);
555 else
556 _mtx_lock_spin_failed(m);
557 cpu_spinwait();
558 }
559 spinlock_enter();
560 }
561
562 if (LOCK_LOG_TEST(&m->lock_object, opts))
563 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
564
565 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
566 contested, waittime, (file), (line));
567 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
568 }
569 #endif /* SMP */
570
571 void
572 _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
573 {
574 struct mtx *m;
575 uintptr_t tid;
576 int i;
577 #ifdef LOCK_PROFILING
578 int contested = 0;
579 uint64_t waittime = 0;
580 #endif
581 #ifdef KDTRACE_HOOKS
582 uint64_t spin_cnt = 0;
583 #endif
584
585 i = 0;
586 tid = (uintptr_t)curthread;
587
588 if (SCHEDULER_STOPPED())
589 return;
590
591 for (;;) {
592 retry:
593 spinlock_enter();
594 m = td->td_lock;
595 KASSERT(m->mtx_lock != MTX_DESTROYED,
596 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
597 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
598 ("thread_lock() of sleep mutex %s @ %s:%d",
599 m->lock_object.lo_name, file, line));
600 if (mtx_owned(m))
601 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
602 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
603 m->lock_object.lo_name, file, line));
604 WITNESS_CHECKORDER(&m->lock_object,
605 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
606 while (!_mtx_obtain_lock(m, tid)) {
607 #ifdef KDTRACE_HOOKS
608 spin_cnt++;
609 #endif
610 if (m->mtx_lock == tid) {
611 m->mtx_recurse++;
612 break;
613 }
614 #ifdef HWPMC_HOOKS
615 PMC_SOFT_CALL( , , lock, failed);
616 #endif
617 lock_profile_obtain_lock_failed(&m->lock_object,
618 &contested, &waittime);
619 /* Give interrupts a chance while we spin. */
620 spinlock_exit();
621 while (m->mtx_lock != MTX_UNOWNED) {
622 if (i++ < 10000000)
623 cpu_spinwait();
624 else if (i < 60000000 ||
625 kdb_active || panicstr != NULL)
626 DELAY(1);
627 else
628 _mtx_lock_spin_failed(m);
629 cpu_spinwait();
630 if (m != td->td_lock)
631 goto retry;
632 }
633 spinlock_enter();
634 }
635 if (m == td->td_lock)
636 break;
637 __mtx_unlock_spin(m); /* does spinlock_exit() */
638 #ifdef KDTRACE_HOOKS
639 spin_cnt++;
640 #endif
641 }
642 if (m->mtx_recurse == 0)
643 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
644 m, contested, waittime, (file), (line));
645 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
646 line);
647 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
648 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
649 }
650
651 struct mtx *
652 thread_lock_block(struct thread *td)
653 {
654 struct mtx *lock;
655
656 THREAD_LOCK_ASSERT(td, MA_OWNED);
657 lock = td->td_lock;
658 td->td_lock = &blocked_lock;
659 mtx_unlock_spin(lock);
660
661 return (lock);
662 }
663
664 void
665 thread_lock_unblock(struct thread *td, struct mtx *new)
666 {
667 mtx_assert(new, MA_OWNED);
668 MPASS(td->td_lock == &blocked_lock);
669 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
670 }
671
672 void
673 thread_lock_set(struct thread *td, struct mtx *new)
674 {
675 struct mtx *lock;
676
677 mtx_assert(new, MA_OWNED);
678 THREAD_LOCK_ASSERT(td, MA_OWNED);
679 lock = td->td_lock;
680 td->td_lock = new;
681 mtx_unlock_spin(lock);
682 }
683
684 /*
685 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
686 *
687 * We are only called here if the lock is recursed or contested (i.e. we
688 * need to wake up a blocked thread).
689 */
690 void
691 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
692 {
693 struct turnstile *ts;
694
695 if (SCHEDULER_STOPPED())
696 return;
697
698 if (mtx_recursed(m)) {
699 if (--(m->mtx_recurse) == 0)
700 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
701 if (LOCK_LOG_TEST(&m->lock_object, opts))
702 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
703 return;
704 }
705
706 /*
707 * We have to lock the chain before the turnstile so this turnstile
708 * can be removed from the hash list if it is empty.
709 */
710 turnstile_chain_lock(&m->lock_object);
711 ts = turnstile_lookup(&m->lock_object);
712 if (LOCK_LOG_TEST(&m->lock_object, opts))
713 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
714 MPASS(ts != NULL);
715 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
716 _mtx_release_lock_quick(m);
717
718 /*
719 * This turnstile is now no longer associated with the mutex. We can
720 * unlock the chain lock so a new turnstile may take it's place.
721 */
722 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
723 turnstile_chain_unlock(&m->lock_object);
724 }
725
726 /*
727 * All the unlocking of MTX_SPIN locks is done inline.
728 * See the __mtx_unlock_spin() macro for the details.
729 */
730
731 /*
732 * The backing function for the INVARIANTS-enabled mtx_assert()
733 */
734 #ifdef INVARIANT_SUPPORT
735 void
736 _mtx_assert(struct mtx *m, int what, const char *file, int line)
737 {
738
739 if (panicstr != NULL || dumping)
740 return;
741 switch (what) {
742 case MA_OWNED:
743 case MA_OWNED | MA_RECURSED:
744 case MA_OWNED | MA_NOTRECURSED:
745 if (!mtx_owned(m))
746 panic("mutex %s not owned at %s:%d",
747 m->lock_object.lo_name, file, line);
748 if (mtx_recursed(m)) {
749 if ((what & MA_NOTRECURSED) != 0)
750 panic("mutex %s recursed at %s:%d",
751 m->lock_object.lo_name, file, line);
752 } else if ((what & MA_RECURSED) != 0) {
753 panic("mutex %s unrecursed at %s:%d",
754 m->lock_object.lo_name, file, line);
755 }
756 break;
757 case MA_NOTOWNED:
758 if (mtx_owned(m))
759 panic("mutex %s owned at %s:%d",
760 m->lock_object.lo_name, file, line);
761 break;
762 default:
763 panic("unknown mtx_assert at %s:%d", file, line);
764 }
765 }
766 #endif
767
768 /*
769 * The MUTEX_DEBUG-enabled mtx_validate()
770 *
771 * Most of these checks have been moved off into the LO_INITIALIZED flag
772 * maintained by the witness code.
773 */
774 #ifdef MUTEX_DEBUG
775
776 void mtx_validate(struct mtx *);
777
778 void
779 mtx_validate(struct mtx *m)
780 {
781
782 /*
783 * XXX: When kernacc() does not require Giant we can reenable this check
784 */
785 #ifdef notyet
786 /*
787 * Can't call kernacc() from early init386(), especially when
788 * initializing Giant mutex, because some stuff in kernacc()
789 * requires Giant itself.
790 */
791 if (!cold)
792 if (!kernacc((caddr_t)m, sizeof(m),
793 VM_PROT_READ | VM_PROT_WRITE))
794 panic("Can't read and write to mutex %p", m);
795 #endif
796 }
797 #endif
798
799 /*
800 * General init routine used by the MTX_SYSINIT() macro.
801 */
802 void
803 mtx_sysinit(void *arg)
804 {
805 struct mtx_args *margs = arg;
806
807 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
808 }
809
810 /*
811 * Mutex initialization routine; initialize lock `m' of type contained in
812 * `opts' with options contained in `opts' and name `name.' The optional
813 * lock type `type' is used as a general lock category name for use with
814 * witness.
815 */
816 void
817 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
818 {
819 struct lock_class *class;
820 int flags;
821
822 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
823 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
824 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
825 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
826 &m->mtx_lock));
827
828 #ifdef MUTEX_DEBUG
829 /* Diagnostic and error correction */
830 mtx_validate(m);
831 #endif
832
833 /* Determine lock class and lock flags. */
834 if (opts & MTX_SPIN)
835 class = &lock_class_mtx_spin;
836 else
837 class = &lock_class_mtx_sleep;
838 flags = 0;
839 if (opts & MTX_QUIET)
840 flags |= LO_QUIET;
841 if (opts & MTX_RECURSE)
842 flags |= LO_RECURSABLE;
843 if ((opts & MTX_NOWITNESS) == 0)
844 flags |= LO_WITNESS;
845 if (opts & MTX_DUPOK)
846 flags |= LO_DUPOK;
847 if (opts & MTX_NOPROFILE)
848 flags |= LO_NOPROFILE;
849
850 /* Initialize mutex. */
851 m->mtx_lock = MTX_UNOWNED;
852 m->mtx_recurse = 0;
853
854 lock_init(&m->lock_object, class, name, type, flags);
855 }
856
857 /*
858 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
859 * passed in as a flag here because if the corresponding mtx_init() was
860 * called with MTX_QUIET set, then it will already be set in the mutex's
861 * flags.
862 */
863 void
864 mtx_destroy(struct mtx *m)
865 {
866
867 if (!mtx_owned(m))
868 MPASS(mtx_unowned(m));
869 else {
870 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
871
872 /* Perform the non-mtx related part of mtx_unlock_spin(). */
873 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
874 spinlock_exit();
875 else
876 curthread->td_locks--;
877
878 lock_profile_release_lock(&m->lock_object);
879 /* Tell witness this isn't locked to make it happy. */
880 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
881 __LINE__);
882 }
883
884 m->mtx_lock = MTX_DESTROYED;
885 lock_destroy(&m->lock_object);
886 }
887
888 /*
889 * Intialize the mutex code and system mutexes. This is called from the MD
890 * startup code prior to mi_startup(). The per-CPU data space needs to be
891 * setup before this is called.
892 */
893 void
894 mutex_init(void)
895 {
896
897 /* Setup turnstiles so that sleep mutexes work. */
898 init_turnstiles();
899
900 /*
901 * Initialize mutexes.
902 */
903 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
904 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
905 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
906 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
907 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
908 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
909 mtx_lock(&Giant);
910 }
911
912 #ifdef DDB
913 void
914 db_show_mtx(struct lock_object *lock)
915 {
916 struct thread *td;
917 struct mtx *m;
918
919 m = (struct mtx *)lock;
920
921 db_printf(" flags: {");
922 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
923 db_printf("SPIN");
924 else
925 db_printf("DEF");
926 if (m->lock_object.lo_flags & LO_RECURSABLE)
927 db_printf(", RECURSE");
928 if (m->lock_object.lo_flags & LO_DUPOK)
929 db_printf(", DUPOK");
930 db_printf("}\n");
931 db_printf(" state: {");
932 if (mtx_unowned(m))
933 db_printf("UNOWNED");
934 else if (mtx_destroyed(m))
935 db_printf("DESTROYED");
936 else {
937 db_printf("OWNED");
938 if (m->mtx_lock & MTX_CONTESTED)
939 db_printf(", CONTESTED");
940 if (m->mtx_lock & MTX_RECURSED)
941 db_printf(", RECURSED");
942 }
943 db_printf("}\n");
944 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
945 td = mtx_owner(m);
946 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
947 td->td_tid, td->td_proc->p_pid, td->td_name);
948 if (mtx_recursed(m))
949 db_printf(" recursed: %d\n", m->mtx_recurse);
950 }
951 }
952 #endif
Cache object: 7a8b05346af11f6a3af21f3cd1f1b6a0
|