FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_mutex.c 285759 2015-07-21 17:16:37Z markj $");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/sysctl.h>
61 #include <sys/turnstile.h>
62 #include <sys/vmmeter.h>
63 #include <sys/lock_profile.h>
64
65 #include <machine/atomic.h>
66 #include <machine/bus.h>
67 #include <machine/cpu.h>
68
69 #include <ddb/ddb.h>
70
71 #include <fs/devfs/devfs_int.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_extern.h>
75
76 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
77 #define ADAPTIVE_MUTEXES
78 #endif
79
80 #ifdef HWPMC_HOOKS
81 #include <sys/pmckern.h>
82 PMC_SOFT_DEFINE( , , lock, failed);
83 #endif
84
85 /*
86 * Return the mutex address when the lock cookie address is provided.
87 * This functionality assumes that struct mtx* have a member named mtx_lock.
88 */
89 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
90
91 /*
92 * Internal utility macros.
93 */
94 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
95
96 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
97
98 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
99
100 static void assert_mtx(const struct lock_object *lock, int what);
101 #ifdef DDB
102 static void db_show_mtx(const struct lock_object *lock);
103 #endif
104 static void lock_mtx(struct lock_object *lock, uintptr_t how);
105 static void lock_spin(struct lock_object *lock, uintptr_t how);
106 #ifdef KDTRACE_HOOKS
107 static int owner_mtx(const struct lock_object *lock,
108 struct thread **owner);
109 #endif
110 static uintptr_t unlock_mtx(struct lock_object *lock);
111 static uintptr_t unlock_spin(struct lock_object *lock);
112
113 /*
114 * Lock classes for sleep and spin mutexes.
115 */
116 struct lock_class lock_class_mtx_sleep = {
117 .lc_name = "sleep mutex",
118 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
119 .lc_assert = assert_mtx,
120 #ifdef DDB
121 .lc_ddb_show = db_show_mtx,
122 #endif
123 .lc_lock = lock_mtx,
124 .lc_unlock = unlock_mtx,
125 #ifdef KDTRACE_HOOKS
126 .lc_owner = owner_mtx,
127 #endif
128 };
129 struct lock_class lock_class_mtx_spin = {
130 .lc_name = "spin mutex",
131 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
132 .lc_assert = assert_mtx,
133 #ifdef DDB
134 .lc_ddb_show = db_show_mtx,
135 #endif
136 .lc_lock = lock_spin,
137 .lc_unlock = unlock_spin,
138 #ifdef KDTRACE_HOOKS
139 .lc_owner = owner_mtx,
140 #endif
141 };
142
143 /*
144 * System-wide mutexes
145 */
146 struct mtx blocked_lock;
147 struct mtx Giant;
148
149 void
150 assert_mtx(const struct lock_object *lock, int what)
151 {
152
153 mtx_assert((const struct mtx *)lock, what);
154 }
155
156 void
157 lock_mtx(struct lock_object *lock, uintptr_t how)
158 {
159
160 mtx_lock((struct mtx *)lock);
161 }
162
163 void
164 lock_spin(struct lock_object *lock, uintptr_t how)
165 {
166
167 panic("spin locks can only use msleep_spin");
168 }
169
170 uintptr_t
171 unlock_mtx(struct lock_object *lock)
172 {
173 struct mtx *m;
174
175 m = (struct mtx *)lock;
176 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
177 mtx_unlock(m);
178 return (0);
179 }
180
181 uintptr_t
182 unlock_spin(struct lock_object *lock)
183 {
184
185 panic("spin locks can only use msleep_spin");
186 }
187
188 #ifdef KDTRACE_HOOKS
189 int
190 owner_mtx(const struct lock_object *lock, struct thread **owner)
191 {
192 const struct mtx *m = (const struct mtx *)lock;
193
194 *owner = mtx_owner(m);
195 return (mtx_unowned(m) == 0);
196 }
197 #endif
198
199 /*
200 * Function versions of the inlined __mtx_* macros. These are used by
201 * modules and can also be called from assembly language if needed.
202 */
203 void
204 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
205 {
206 struct mtx *m;
207
208 if (SCHEDULER_STOPPED())
209 return;
210
211 m = mtxlock2mtx(c);
212
213 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
214 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
215 curthread, m->lock_object.lo_name, file, line));
216 KASSERT(m->mtx_lock != MTX_DESTROYED,
217 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
218 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
219 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
220 file, line));
221 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
222 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
223
224 __mtx_lock(m, curthread, opts, file, line);
225 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
226 line);
227 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
228 file, line);
229 curthread->td_locks++;
230 }
231
232 void
233 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
234 {
235 struct mtx *m;
236
237 if (SCHEDULER_STOPPED())
238 return;
239
240 m = mtxlock2mtx(c);
241
242 KASSERT(m->mtx_lock != MTX_DESTROYED,
243 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
244 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
245 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
246 file, line));
247 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
248 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
249 line);
250 mtx_assert(m, MA_OWNED);
251
252 if (m->mtx_recurse == 0)
253 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
254 __mtx_unlock(m, curthread, opts, file, line);
255 curthread->td_locks--;
256 }
257
258 void
259 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
260 int line)
261 {
262 struct mtx *m;
263
264 if (SCHEDULER_STOPPED())
265 return;
266
267 m = mtxlock2mtx(c);
268
269 KASSERT(m->mtx_lock != MTX_DESTROYED,
270 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
271 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
272 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
273 m->lock_object.lo_name, file, line));
274 if (mtx_owned(m))
275 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
276 (opts & MTX_RECURSE) != 0,
277 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
278 m->lock_object.lo_name, file, line));
279 opts &= ~MTX_RECURSE;
280 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
281 file, line, NULL);
282 __mtx_lock_spin(m, curthread, opts, file, line);
283 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
284 line);
285 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
286 }
287
288 void
289 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
290 int line)
291 {
292 struct mtx *m;
293
294 if (SCHEDULER_STOPPED())
295 return;
296
297 m = mtxlock2mtx(c);
298
299 KASSERT(m->mtx_lock != MTX_DESTROYED,
300 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
301 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
302 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
303 m->lock_object.lo_name, file, line));
304 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
305 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
306 line);
307 mtx_assert(m, MA_OWNED);
308
309 __mtx_unlock_spin(m);
310 }
311
312 /*
313 * The important part of mtx_trylock{,_flags}()
314 * Tries to acquire lock `m.' If this function is called on a mutex that
315 * is already owned, it will recursively acquire the lock.
316 */
317 int
318 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
319 {
320 struct mtx *m;
321 #ifdef LOCK_PROFILING
322 uint64_t waittime = 0;
323 int contested = 0;
324 #endif
325 int rval;
326
327 if (SCHEDULER_STOPPED())
328 return (1);
329
330 m = mtxlock2mtx(c);
331
332 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
333 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
334 curthread, m->lock_object.lo_name, file, line));
335 KASSERT(m->mtx_lock != MTX_DESTROYED,
336 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
337 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
338 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
339 file, line));
340
341 if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
342 (opts & MTX_RECURSE) != 0)) {
343 m->mtx_recurse++;
344 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
345 rval = 1;
346 } else
347 rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
348 opts &= ~MTX_RECURSE;
349
350 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
351 if (rval) {
352 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
353 file, line);
354 curthread->td_locks++;
355 if (m->mtx_recurse == 0)
356 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
357 m, contested, waittime, file, line);
358
359 }
360
361 return (rval);
362 }
363
364 /*
365 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
366 *
367 * We call this if the lock is either contested (i.e. we need to go to
368 * sleep waiting for it), or if we need to recurse on it.
369 */
370 void
371 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
372 const char *file, int line)
373 {
374 struct mtx *m;
375 struct turnstile *ts;
376 uintptr_t v;
377 #ifdef ADAPTIVE_MUTEXES
378 volatile struct thread *owner;
379 #endif
380 #ifdef KTR
381 int cont_logged = 0;
382 #endif
383 #ifdef LOCK_PROFILING
384 int contested = 0;
385 uint64_t waittime = 0;
386 #endif
387 #ifdef KDTRACE_HOOKS
388 uint64_t spin_cnt = 0;
389 uint64_t sleep_cnt = 0;
390 int64_t sleep_time = 0;
391 int64_t all_time = 0;
392 #endif
393
394 if (SCHEDULER_STOPPED())
395 return;
396
397 m = mtxlock2mtx(c);
398
399 if (mtx_owned(m)) {
400 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
401 (opts & MTX_RECURSE) != 0,
402 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
403 m->lock_object.lo_name, file, line));
404 opts &= ~MTX_RECURSE;
405 m->mtx_recurse++;
406 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
407 if (LOCK_LOG_TEST(&m->lock_object, opts))
408 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
409 return;
410 }
411 opts &= ~MTX_RECURSE;
412
413 #ifdef HWPMC_HOOKS
414 PMC_SOFT_CALL( , , lock, failed);
415 #endif
416 lock_profile_obtain_lock_failed(&m->lock_object,
417 &contested, &waittime);
418 if (LOCK_LOG_TEST(&m->lock_object, opts))
419 CTR4(KTR_LOCK,
420 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
421 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
422 #ifdef KDTRACE_HOOKS
423 all_time -= lockstat_nsecs(&m->lock_object);
424 #endif
425
426 while (!_mtx_obtain_lock(m, tid)) {
427 #ifdef KDTRACE_HOOKS
428 spin_cnt++;
429 #endif
430 #ifdef ADAPTIVE_MUTEXES
431 /*
432 * If the owner is running on another CPU, spin until the
433 * owner stops running or the state of the lock changes.
434 */
435 v = m->mtx_lock;
436 if (v != MTX_UNOWNED) {
437 owner = (struct thread *)(v & ~MTX_FLAGMASK);
438 if (TD_IS_RUNNING(owner)) {
439 if (LOCK_LOG_TEST(&m->lock_object, 0))
440 CTR3(KTR_LOCK,
441 "%s: spinning on %p held by %p",
442 __func__, m, owner);
443 KTR_STATE1(KTR_SCHED, "thread",
444 sched_tdname((struct thread *)tid),
445 "spinning", "lockname:\"%s\"",
446 m->lock_object.lo_name);
447 while (mtx_owner(m) == owner &&
448 TD_IS_RUNNING(owner)) {
449 cpu_spinwait();
450 #ifdef KDTRACE_HOOKS
451 spin_cnt++;
452 #endif
453 }
454 KTR_STATE0(KTR_SCHED, "thread",
455 sched_tdname((struct thread *)tid),
456 "running");
457 continue;
458 }
459 }
460 #endif
461
462 ts = turnstile_trywait(&m->lock_object);
463 v = m->mtx_lock;
464
465 /*
466 * Check if the lock has been released while spinning for
467 * the turnstile chain lock.
468 */
469 if (v == MTX_UNOWNED) {
470 turnstile_cancel(ts);
471 continue;
472 }
473
474 #ifdef ADAPTIVE_MUTEXES
475 /*
476 * The current lock owner might have started executing
477 * on another CPU (or the lock could have changed
478 * owners) while we were waiting on the turnstile
479 * chain lock. If so, drop the turnstile lock and try
480 * again.
481 */
482 owner = (struct thread *)(v & ~MTX_FLAGMASK);
483 if (TD_IS_RUNNING(owner)) {
484 turnstile_cancel(ts);
485 continue;
486 }
487 #endif
488
489 /*
490 * If the mutex isn't already contested and a failure occurs
491 * setting the contested bit, the mutex was either released
492 * or the state of the MTX_RECURSED bit changed.
493 */
494 if ((v & MTX_CONTESTED) == 0 &&
495 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
496 turnstile_cancel(ts);
497 continue;
498 }
499
500 /*
501 * We definitely must sleep for this lock.
502 */
503 mtx_assert(m, MA_NOTOWNED);
504
505 #ifdef KTR
506 if (!cont_logged) {
507 CTR6(KTR_CONTENTION,
508 "contention: %p at %s:%d wants %s, taken by %s:%d",
509 (void *)tid, file, line, m->lock_object.lo_name,
510 WITNESS_FILE(&m->lock_object),
511 WITNESS_LINE(&m->lock_object));
512 cont_logged = 1;
513 }
514 #endif
515
516 /*
517 * Block on the turnstile.
518 */
519 #ifdef KDTRACE_HOOKS
520 sleep_time -= lockstat_nsecs(&m->lock_object);
521 #endif
522 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
523 #ifdef KDTRACE_HOOKS
524 sleep_time += lockstat_nsecs(&m->lock_object);
525 sleep_cnt++;
526 #endif
527 }
528 #ifdef KDTRACE_HOOKS
529 all_time += lockstat_nsecs(&m->lock_object);
530 #endif
531 #ifdef KTR
532 if (cont_logged) {
533 CTR4(KTR_CONTENTION,
534 "contention end: %s acquired by %p at %s:%d",
535 m->lock_object.lo_name, (void *)tid, file, line);
536 }
537 #endif
538 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
539 waittime, file, line);
540 #ifdef KDTRACE_HOOKS
541 if (sleep_time)
542 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
543
544 /*
545 * Only record the loops spinning and not sleeping.
546 */
547 if (spin_cnt > sleep_cnt)
548 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
549 #endif
550 }
551
552 static void
553 _mtx_lock_spin_failed(struct mtx *m)
554 {
555 struct thread *td;
556
557 td = mtx_owner(m);
558
559 /* If the mutex is unlocked, try again. */
560 if (td == NULL)
561 return;
562
563 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
564 m, m->lock_object.lo_name, td, td->td_tid);
565 #ifdef WITNESS
566 witness_display_spinlock(&m->lock_object, td, printf);
567 #endif
568 panic("spin lock held too long");
569 }
570
571 #ifdef SMP
572 /*
573 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
574 *
575 * This is only called if we need to actually spin for the lock. Recursion
576 * is handled inline.
577 */
578 void
579 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
580 const char *file, int line)
581 {
582 struct mtx *m;
583 int i = 0;
584 #ifdef LOCK_PROFILING
585 int contested = 0;
586 uint64_t waittime = 0;
587 #endif
588 #ifdef KDTRACE_HOOKS
589 int64_t spin_time = 0;
590 #endif
591
592 if (SCHEDULER_STOPPED())
593 return;
594
595 m = mtxlock2mtx(c);
596
597 if (LOCK_LOG_TEST(&m->lock_object, opts))
598 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
599 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
600 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
601
602 #ifdef HWPMC_HOOKS
603 PMC_SOFT_CALL( , , lock, failed);
604 #endif
605 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
606 #ifdef KDTRACE_HOOKS
607 spin_time -= lockstat_nsecs(&m->lock_object);
608 #endif
609 while (!_mtx_obtain_lock(m, tid)) {
610
611 /* Give interrupts a chance while we spin. */
612 spinlock_exit();
613 while (m->mtx_lock != MTX_UNOWNED) {
614 if (i++ < 10000000) {
615 cpu_spinwait();
616 continue;
617 }
618 if (i < 60000000 || kdb_active || panicstr != NULL)
619 DELAY(1);
620 else
621 _mtx_lock_spin_failed(m);
622 cpu_spinwait();
623 }
624 spinlock_enter();
625 }
626 #ifdef KDTRACE_HOOKS
627 spin_time += lockstat_nsecs(&m->lock_object);
628 #endif
629
630 if (LOCK_LOG_TEST(&m->lock_object, opts))
631 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
632 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
633 "running");
634
635 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
636 contested, waittime, (file), (line));
637 #ifdef KDTRACE_HOOKS
638 if (spin_time != 0)
639 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
640 #endif
641 }
642 #endif /* SMP */
643
644 void
645 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
646 {
647 struct mtx *m;
648 uintptr_t tid;
649 int i;
650 #ifdef LOCK_PROFILING
651 int contested = 0;
652 uint64_t waittime = 0;
653 #endif
654 #ifdef KDTRACE_HOOKS
655 int64_t spin_time = 0;
656 #endif
657
658 i = 0;
659 tid = (uintptr_t)curthread;
660
661 if (SCHEDULER_STOPPED())
662 return;
663
664 #ifdef KDTRACE_HOOKS
665 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
666 #endif
667 for (;;) {
668 retry:
669 spinlock_enter();
670 m = td->td_lock;
671 KASSERT(m->mtx_lock != MTX_DESTROYED,
672 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
673 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
674 ("thread_lock() of sleep mutex %s @ %s:%d",
675 m->lock_object.lo_name, file, line));
676 if (mtx_owned(m))
677 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
678 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
679 m->lock_object.lo_name, file, line));
680 WITNESS_CHECKORDER(&m->lock_object,
681 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
682 while (!_mtx_obtain_lock(m, tid)) {
683 if (m->mtx_lock == tid) {
684 m->mtx_recurse++;
685 break;
686 }
687 #ifdef HWPMC_HOOKS
688 PMC_SOFT_CALL( , , lock, failed);
689 #endif
690 lock_profile_obtain_lock_failed(&m->lock_object,
691 &contested, &waittime);
692 /* Give interrupts a chance while we spin. */
693 spinlock_exit();
694 while (m->mtx_lock != MTX_UNOWNED) {
695 if (i++ < 10000000)
696 cpu_spinwait();
697 else if (i < 60000000 ||
698 kdb_active || panicstr != NULL)
699 DELAY(1);
700 else
701 _mtx_lock_spin_failed(m);
702 cpu_spinwait();
703 if (m != td->td_lock)
704 goto retry;
705 }
706 spinlock_enter();
707 }
708 if (m == td->td_lock)
709 break;
710 __mtx_unlock_spin(m); /* does spinlock_exit() */
711 }
712 #ifdef KDTRACE_HOOKS
713 spin_time += lockstat_nsecs(&m->lock_object);
714 #endif
715 if (m->mtx_recurse == 0)
716 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
717 m, contested, waittime, (file), (line));
718 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
719 line);
720 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
721 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
722 }
723
724 struct mtx *
725 thread_lock_block(struct thread *td)
726 {
727 struct mtx *lock;
728
729 THREAD_LOCK_ASSERT(td, MA_OWNED);
730 lock = td->td_lock;
731 td->td_lock = &blocked_lock;
732 mtx_unlock_spin(lock);
733
734 return (lock);
735 }
736
737 void
738 thread_lock_unblock(struct thread *td, struct mtx *new)
739 {
740 mtx_assert(new, MA_OWNED);
741 MPASS(td->td_lock == &blocked_lock);
742 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
743 }
744
745 void
746 thread_lock_set(struct thread *td, struct mtx *new)
747 {
748 struct mtx *lock;
749
750 mtx_assert(new, MA_OWNED);
751 THREAD_LOCK_ASSERT(td, MA_OWNED);
752 lock = td->td_lock;
753 td->td_lock = new;
754 mtx_unlock_spin(lock);
755 }
756
757 /*
758 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
759 *
760 * We are only called here if the lock is recursed or contested (i.e. we
761 * need to wake up a blocked thread).
762 */
763 void
764 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
765 {
766 struct mtx *m;
767 struct turnstile *ts;
768
769 if (SCHEDULER_STOPPED())
770 return;
771
772 m = mtxlock2mtx(c);
773
774 if (mtx_recursed(m)) {
775 if (--(m->mtx_recurse) == 0)
776 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
777 if (LOCK_LOG_TEST(&m->lock_object, opts))
778 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
779 return;
780 }
781
782 /*
783 * We have to lock the chain before the turnstile so this turnstile
784 * can be removed from the hash list if it is empty.
785 */
786 turnstile_chain_lock(&m->lock_object);
787 ts = turnstile_lookup(&m->lock_object);
788 if (LOCK_LOG_TEST(&m->lock_object, opts))
789 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
790 MPASS(ts != NULL);
791 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
792 _mtx_release_lock_quick(m);
793
794 /*
795 * This turnstile is now no longer associated with the mutex. We can
796 * unlock the chain lock so a new turnstile may take it's place.
797 */
798 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
799 turnstile_chain_unlock(&m->lock_object);
800 }
801
802 /*
803 * All the unlocking of MTX_SPIN locks is done inline.
804 * See the __mtx_unlock_spin() macro for the details.
805 */
806
807 /*
808 * The backing function for the INVARIANTS-enabled mtx_assert()
809 */
810 #ifdef INVARIANT_SUPPORT
811 void
812 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
813 {
814 const struct mtx *m;
815
816 if (panicstr != NULL || dumping)
817 return;
818
819 m = mtxlock2mtx(c);
820
821 switch (what) {
822 case MA_OWNED:
823 case MA_OWNED | MA_RECURSED:
824 case MA_OWNED | MA_NOTRECURSED:
825 if (!mtx_owned(m))
826 panic("mutex %s not owned at %s:%d",
827 m->lock_object.lo_name, file, line);
828 if (mtx_recursed(m)) {
829 if ((what & MA_NOTRECURSED) != 0)
830 panic("mutex %s recursed at %s:%d",
831 m->lock_object.lo_name, file, line);
832 } else if ((what & MA_RECURSED) != 0) {
833 panic("mutex %s unrecursed at %s:%d",
834 m->lock_object.lo_name, file, line);
835 }
836 break;
837 case MA_NOTOWNED:
838 if (mtx_owned(m))
839 panic("mutex %s owned at %s:%d",
840 m->lock_object.lo_name, file, line);
841 break;
842 default:
843 panic("unknown mtx_assert at %s:%d", file, line);
844 }
845 }
846 #endif
847
848 /*
849 * The MUTEX_DEBUG-enabled mtx_validate()
850 *
851 * Most of these checks have been moved off into the LO_INITIALIZED flag
852 * maintained by the witness code.
853 */
854 #ifdef MUTEX_DEBUG
855
856 void mtx_validate(struct mtx *);
857
858 void
859 mtx_validate(struct mtx *m)
860 {
861
862 /*
863 * XXX: When kernacc() does not require Giant we can reenable this check
864 */
865 #ifdef notyet
866 /*
867 * Can't call kernacc() from early init386(), especially when
868 * initializing Giant mutex, because some stuff in kernacc()
869 * requires Giant itself.
870 */
871 if (!cold)
872 if (!kernacc((caddr_t)m, sizeof(m),
873 VM_PROT_READ | VM_PROT_WRITE))
874 panic("Can't read and write to mutex %p", m);
875 #endif
876 }
877 #endif
878
879 /*
880 * General init routine used by the MTX_SYSINIT() macro.
881 */
882 void
883 mtx_sysinit(void *arg)
884 {
885 struct mtx_args *margs = arg;
886
887 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
888 margs->ma_opts);
889 }
890
891 /*
892 * Mutex initialization routine; initialize lock `m' of type contained in
893 * `opts' with options contained in `opts' and name `name.' The optional
894 * lock type `type' is used as a general lock category name for use with
895 * witness.
896 */
897 void
898 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
899 {
900 struct mtx *m;
901 struct lock_class *class;
902 int flags;
903
904 m = mtxlock2mtx(c);
905
906 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
907 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
908 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
909 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
910 &m->mtx_lock));
911
912 #ifdef MUTEX_DEBUG
913 /* Diagnostic and error correction */
914 mtx_validate(m);
915 #endif
916
917 /* Determine lock class and lock flags. */
918 if (opts & MTX_SPIN)
919 class = &lock_class_mtx_spin;
920 else
921 class = &lock_class_mtx_sleep;
922 flags = 0;
923 if (opts & MTX_QUIET)
924 flags |= LO_QUIET;
925 if (opts & MTX_RECURSE)
926 flags |= LO_RECURSABLE;
927 if ((opts & MTX_NOWITNESS) == 0)
928 flags |= LO_WITNESS;
929 if (opts & MTX_DUPOK)
930 flags |= LO_DUPOK;
931 if (opts & MTX_NOPROFILE)
932 flags |= LO_NOPROFILE;
933
934 /* Initialize mutex. */
935 lock_init(&m->lock_object, class, name, type, flags);
936
937 m->mtx_lock = MTX_UNOWNED;
938 m->mtx_recurse = 0;
939 }
940
941 /*
942 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
943 * passed in as a flag here because if the corresponding mtx_init() was
944 * called with MTX_QUIET set, then it will already be set in the mutex's
945 * flags.
946 */
947 void
948 _mtx_destroy(volatile uintptr_t *c)
949 {
950 struct mtx *m;
951
952 m = mtxlock2mtx(c);
953
954 if (!mtx_owned(m))
955 MPASS(mtx_unowned(m));
956 else {
957 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
958
959 /* Perform the non-mtx related part of mtx_unlock_spin(). */
960 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
961 spinlock_exit();
962 else
963 curthread->td_locks--;
964
965 lock_profile_release_lock(&m->lock_object);
966 /* Tell witness this isn't locked to make it happy. */
967 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
968 __LINE__);
969 }
970
971 m->mtx_lock = MTX_DESTROYED;
972 lock_destroy(&m->lock_object);
973 }
974
975 /*
976 * Intialize the mutex code and system mutexes. This is called from the MD
977 * startup code prior to mi_startup(). The per-CPU data space needs to be
978 * setup before this is called.
979 */
980 void
981 mutex_init(void)
982 {
983
984 /* Setup turnstiles so that sleep mutexes work. */
985 init_turnstiles();
986
987 /*
988 * Initialize mutexes.
989 */
990 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
991 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
992 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
993 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
994 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
995 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
996 mtx_lock(&Giant);
997 }
998
999 #ifdef DDB
1000 void
1001 db_show_mtx(const struct lock_object *lock)
1002 {
1003 struct thread *td;
1004 const struct mtx *m;
1005
1006 m = (const struct mtx *)lock;
1007
1008 db_printf(" flags: {");
1009 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1010 db_printf("SPIN");
1011 else
1012 db_printf("DEF");
1013 if (m->lock_object.lo_flags & LO_RECURSABLE)
1014 db_printf(", RECURSE");
1015 if (m->lock_object.lo_flags & LO_DUPOK)
1016 db_printf(", DUPOK");
1017 db_printf("}\n");
1018 db_printf(" state: {");
1019 if (mtx_unowned(m))
1020 db_printf("UNOWNED");
1021 else if (mtx_destroyed(m))
1022 db_printf("DESTROYED");
1023 else {
1024 db_printf("OWNED");
1025 if (m->mtx_lock & MTX_CONTESTED)
1026 db_printf(", CONTESTED");
1027 if (m->mtx_lock & MTX_RECURSED)
1028 db_printf(", RECURSED");
1029 }
1030 db_printf("}\n");
1031 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1032 td = mtx_owner(m);
1033 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1034 td->td_tid, td->td_proc->p_pid, td->td_name);
1035 if (mtx_recursed(m))
1036 db_printf(" recursed: %d\n", m->mtx_recurse);
1037 }
1038 }
1039 #endif
Cache object: fc012eb97e1e38cfbfbe6fe890a99eed
|