FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/11.1/sys/kern/kern_mutex.c 320241 2017-06-22 18:40:34Z markj $");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sched.h>
57 #include <sys/sbuf.h>
58 #include <sys/smp.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62 #include <sys/lock_profile.h>
63
64 #include <machine/atomic.h>
65 #include <machine/bus.h>
66 #include <machine/cpu.h>
67
68 #include <ddb/ddb.h>
69
70 #include <fs/devfs/devfs_int.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74
75 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
76 #define ADAPTIVE_MUTEXES
77 #endif
78
79 #ifdef HWPMC_HOOKS
80 #include <sys/pmckern.h>
81 PMC_SOFT_DEFINE( , , lock, failed);
82 #endif
83
84 /*
85 * Return the mutex address when the lock cookie address is provided.
86 * This functionality assumes that struct mtx* have a member named mtx_lock.
87 */
88 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
89
90 /*
91 * Internal utility macros.
92 */
93 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
94
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
96
97 static void assert_mtx(const struct lock_object *lock, int what);
98 #ifdef DDB
99 static void db_show_mtx(const struct lock_object *lock);
100 #endif
101 static void lock_mtx(struct lock_object *lock, uintptr_t how);
102 static void lock_spin(struct lock_object *lock, uintptr_t how);
103 #ifdef KDTRACE_HOOKS
104 static int owner_mtx(const struct lock_object *lock,
105 struct thread **owner);
106 #endif
107 static uintptr_t unlock_mtx(struct lock_object *lock);
108 static uintptr_t unlock_spin(struct lock_object *lock);
109
110 /*
111 * Lock classes for sleep and spin mutexes.
112 */
113 struct lock_class lock_class_mtx_sleep = {
114 .lc_name = "sleep mutex",
115 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
116 .lc_assert = assert_mtx,
117 #ifdef DDB
118 .lc_ddb_show = db_show_mtx,
119 #endif
120 .lc_lock = lock_mtx,
121 .lc_unlock = unlock_mtx,
122 #ifdef KDTRACE_HOOKS
123 .lc_owner = owner_mtx,
124 #endif
125 };
126 struct lock_class lock_class_mtx_spin = {
127 .lc_name = "spin mutex",
128 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
129 .lc_assert = assert_mtx,
130 #ifdef DDB
131 .lc_ddb_show = db_show_mtx,
132 #endif
133 .lc_lock = lock_spin,
134 .lc_unlock = unlock_spin,
135 #ifdef KDTRACE_HOOKS
136 .lc_owner = owner_mtx,
137 #endif
138 };
139
140 #ifdef ADAPTIVE_MUTEXES
141 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
142
143 static struct lock_delay_config __read_mostly mtx_delay;
144
145 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
146 0, "");
147 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
148 0, "");
149
150 LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
151 #endif
152
153 static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL,
154 "mtx spin debugging");
155
156 static struct lock_delay_config __read_mostly mtx_spin_delay;
157
158 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
159 &mtx_spin_delay.base, 0, "");
160 SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
161 &mtx_spin_delay.max, 0, "");
162
163 LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay);
164
165 /*
166 * System-wide mutexes
167 */
168 struct mtx blocked_lock;
169 struct mtx Giant;
170
171 void
172 assert_mtx(const struct lock_object *lock, int what)
173 {
174
175 mtx_assert((const struct mtx *)lock, what);
176 }
177
178 void
179 lock_mtx(struct lock_object *lock, uintptr_t how)
180 {
181
182 mtx_lock((struct mtx *)lock);
183 }
184
185 void
186 lock_spin(struct lock_object *lock, uintptr_t how)
187 {
188
189 panic("spin locks can only use msleep_spin");
190 }
191
192 uintptr_t
193 unlock_mtx(struct lock_object *lock)
194 {
195 struct mtx *m;
196
197 m = (struct mtx *)lock;
198 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
199 mtx_unlock(m);
200 return (0);
201 }
202
203 uintptr_t
204 unlock_spin(struct lock_object *lock)
205 {
206
207 panic("spin locks can only use msleep_spin");
208 }
209
210 #ifdef KDTRACE_HOOKS
211 int
212 owner_mtx(const struct lock_object *lock, struct thread **owner)
213 {
214 const struct mtx *m;
215 uintptr_t x;
216
217 m = (const struct mtx *)lock;
218 x = m->mtx_lock;
219 *owner = (struct thread *)(x & ~MTX_FLAGMASK);
220 return (x != MTX_UNOWNED);
221 }
222 #endif
223
224 /*
225 * Function versions of the inlined __mtx_* macros. These are used by
226 * modules and can also be called from assembly language if needed.
227 */
228 void
229 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
230 {
231 struct mtx *m;
232 uintptr_t tid, v;
233
234 m = mtxlock2mtx(c);
235
236 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
237 !TD_IS_IDLETHREAD(curthread),
238 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
239 curthread, m->lock_object.lo_name, file, line));
240 KASSERT(m->mtx_lock != MTX_DESTROYED,
241 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
242 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
243 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
244 file, line));
245 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
246 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
247
248 tid = (uintptr_t)curthread;
249 v = MTX_UNOWNED;
250 if (!_mtx_obtain_lock_fetch(m, &v, tid))
251 _mtx_lock_sleep(m, v, tid, opts, file, line);
252 else
253 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
254 m, 0, 0, file, line);
255 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
256 line);
257 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
258 file, line);
259 TD_LOCKS_INC(curthread);
260 }
261
262 void
263 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
264 {
265 struct mtx *m;
266
267 m = mtxlock2mtx(c);
268
269 KASSERT(m->mtx_lock != MTX_DESTROYED,
270 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
271 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
272 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
273 file, line));
274 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
275 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
276 line);
277 mtx_assert(m, MA_OWNED);
278
279 #ifdef LOCK_PROFILING
280 __mtx_unlock_sleep(c, opts, file, line);
281 #else
282 __mtx_unlock(m, curthread, opts, file, line);
283 #endif
284 TD_LOCKS_DEC(curthread);
285 }
286
287 void
288 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
289 int line)
290 {
291 struct mtx *m;
292
293 if (SCHEDULER_STOPPED())
294 return;
295
296 m = mtxlock2mtx(c);
297
298 KASSERT(m->mtx_lock != MTX_DESTROYED,
299 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
300 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
301 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
302 m->lock_object.lo_name, file, line));
303 if (mtx_owned(m))
304 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
305 (opts & MTX_RECURSE) != 0,
306 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
307 m->lock_object.lo_name, file, line));
308 opts &= ~MTX_RECURSE;
309 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
310 file, line, NULL);
311 __mtx_lock_spin(m, curthread, opts, file, line);
312 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
313 line);
314 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
315 }
316
317 int
318 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
319 int line)
320 {
321 struct mtx *m;
322
323 if (SCHEDULER_STOPPED())
324 return (1);
325
326 m = mtxlock2mtx(c);
327
328 KASSERT(m->mtx_lock != MTX_DESTROYED,
329 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
330 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
331 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
332 m->lock_object.lo_name, file, line));
333 KASSERT((opts & MTX_RECURSE) == 0,
334 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
335 m->lock_object.lo_name, file, line));
336 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
337 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
338 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
339 return (1);
340 }
341 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
342 return (0);
343 }
344
345 void
346 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
347 int line)
348 {
349 struct mtx *m;
350
351 if (SCHEDULER_STOPPED())
352 return;
353
354 m = mtxlock2mtx(c);
355
356 KASSERT(m->mtx_lock != MTX_DESTROYED,
357 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
358 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
359 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
360 m->lock_object.lo_name, file, line));
361 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
362 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
363 line);
364 mtx_assert(m, MA_OWNED);
365
366 __mtx_unlock_spin(m);
367 }
368
369 /*
370 * The important part of mtx_trylock{,_flags}()
371 * Tries to acquire lock `m.' If this function is called on a mutex that
372 * is already owned, it will recursively acquire the lock.
373 */
374 int
375 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
376 {
377 struct mtx *m;
378 struct thread *td;
379 uintptr_t tid, v;
380 #ifdef LOCK_PROFILING
381 uint64_t waittime = 0;
382 int contested = 0;
383 #endif
384 int rval;
385 bool recursed;
386
387 td = curthread;
388 tid = (uintptr_t)td;
389 if (SCHEDULER_STOPPED_TD(td))
390 return (1);
391
392 m = mtxlock2mtx(c);
393
394 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
395 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
396 curthread, m->lock_object.lo_name, file, line));
397 KASSERT(m->mtx_lock != MTX_DESTROYED,
398 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
399 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
400 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
401 file, line));
402
403 rval = 1;
404 recursed = false;
405 v = MTX_UNOWNED;
406 for (;;) {
407 if (_mtx_obtain_lock_fetch(m, &v, tid))
408 break;
409 if (v == MTX_UNOWNED)
410 continue;
411 if (v == tid &&
412 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
413 (opts & MTX_RECURSE) != 0)) {
414 m->mtx_recurse++;
415 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
416 recursed = true;
417 break;
418 }
419 rval = 0;
420 break;
421 }
422
423 opts &= ~MTX_RECURSE;
424
425 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
426 if (rval) {
427 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
428 file, line);
429 TD_LOCKS_INC(curthread);
430 if (!recursed)
431 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
432 m, contested, waittime, file, line);
433 }
434
435 return (rval);
436 }
437
438 /*
439 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
440 *
441 * We call this if the lock is either contested (i.e. we need to go to
442 * sleep waiting for it), or if we need to recurse on it.
443 */
444 #if LOCK_DEBUG > 0
445 void
446 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
447 const char *file, int line)
448 #else
449 void
450 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid)
451 #endif
452 {
453 struct mtx *m;
454 struct turnstile *ts;
455 #ifdef ADAPTIVE_MUTEXES
456 volatile struct thread *owner;
457 #endif
458 #ifdef KTR
459 int cont_logged = 0;
460 #endif
461 #ifdef LOCK_PROFILING
462 int contested = 0;
463 uint64_t waittime = 0;
464 #endif
465 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
466 struct lock_delay_arg lda;
467 #endif
468 #ifdef KDTRACE_HOOKS
469 u_int sleep_cnt = 0;
470 int64_t sleep_time = 0;
471 int64_t all_time = 0;
472 #endif
473 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
474 int doing_lockprof;
475 #endif
476
477 if (SCHEDULER_STOPPED())
478 return;
479
480 #if defined(ADAPTIVE_MUTEXES)
481 lock_delay_arg_init(&lda, &mtx_delay);
482 #elif defined(KDTRACE_HOOKS)
483 lock_delay_arg_init(&lda, NULL);
484 #endif
485 m = mtxlock2mtx(c);
486 if (__predict_false(v == MTX_UNOWNED))
487 v = MTX_READ_VALUE(m);
488
489 if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
490 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
491 (opts & MTX_RECURSE) != 0,
492 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
493 m->lock_object.lo_name, file, line));
494 #if LOCK_DEBUG > 0
495 opts &= ~MTX_RECURSE;
496 #endif
497 m->mtx_recurse++;
498 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
499 if (LOCK_LOG_TEST(&m->lock_object, opts))
500 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
501 return;
502 }
503 #if LOCK_DEBUG > 0
504 opts &= ~MTX_RECURSE;
505 #endif
506
507 #ifdef HWPMC_HOOKS
508 PMC_SOFT_CALL( , , lock, failed);
509 #endif
510 lock_profile_obtain_lock_failed(&m->lock_object,
511 &contested, &waittime);
512 if (LOCK_LOG_TEST(&m->lock_object, opts))
513 CTR4(KTR_LOCK,
514 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
515 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
516 #ifdef LOCK_PROFILING
517 doing_lockprof = 1;
518 #elif defined(KDTRACE_HOOKS)
519 doing_lockprof = lockstat_enabled;
520 if (__predict_false(doing_lockprof))
521 all_time -= lockstat_nsecs(&m->lock_object);
522 #endif
523
524 for (;;) {
525 if (v == MTX_UNOWNED) {
526 if (_mtx_obtain_lock_fetch(m, &v, tid))
527 break;
528 continue;
529 }
530 #ifdef KDTRACE_HOOKS
531 lda.spin_cnt++;
532 #endif
533 #ifdef ADAPTIVE_MUTEXES
534 /*
535 * If the owner is running on another CPU, spin until the
536 * owner stops running or the state of the lock changes.
537 */
538 owner = lv_mtx_owner(v);
539 if (TD_IS_RUNNING(owner)) {
540 if (LOCK_LOG_TEST(&m->lock_object, 0))
541 CTR3(KTR_LOCK,
542 "%s: spinning on %p held by %p",
543 __func__, m, owner);
544 KTR_STATE1(KTR_SCHED, "thread",
545 sched_tdname((struct thread *)tid),
546 "spinning", "lockname:\"%s\"",
547 m->lock_object.lo_name);
548 do {
549 lock_delay(&lda);
550 v = MTX_READ_VALUE(m);
551 owner = lv_mtx_owner(v);
552 } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
553 KTR_STATE0(KTR_SCHED, "thread",
554 sched_tdname((struct thread *)tid),
555 "running");
556 continue;
557 }
558 #endif
559
560 ts = turnstile_trywait(&m->lock_object);
561 v = MTX_READ_VALUE(m);
562
563 /*
564 * Check if the lock has been released while spinning for
565 * the turnstile chain lock.
566 */
567 if (v == MTX_UNOWNED) {
568 turnstile_cancel(ts);
569 continue;
570 }
571
572 #ifdef ADAPTIVE_MUTEXES
573 /*
574 * The current lock owner might have started executing
575 * on another CPU (or the lock could have changed
576 * owners) while we were waiting on the turnstile
577 * chain lock. If so, drop the turnstile lock and try
578 * again.
579 */
580 owner = lv_mtx_owner(v);
581 if (TD_IS_RUNNING(owner)) {
582 turnstile_cancel(ts);
583 continue;
584 }
585 #endif
586
587 /*
588 * If the mutex isn't already contested and a failure occurs
589 * setting the contested bit, the mutex was either released
590 * or the state of the MTX_RECURSED bit changed.
591 */
592 if ((v & MTX_CONTESTED) == 0 &&
593 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
594 turnstile_cancel(ts);
595 v = MTX_READ_VALUE(m);
596 continue;
597 }
598
599 /*
600 * We definitely must sleep for this lock.
601 */
602 mtx_assert(m, MA_NOTOWNED);
603
604 #ifdef KTR
605 if (!cont_logged) {
606 CTR6(KTR_CONTENTION,
607 "contention: %p at %s:%d wants %s, taken by %s:%d",
608 (void *)tid, file, line, m->lock_object.lo_name,
609 WITNESS_FILE(&m->lock_object),
610 WITNESS_LINE(&m->lock_object));
611 cont_logged = 1;
612 }
613 #endif
614
615 /*
616 * Block on the turnstile.
617 */
618 #ifdef KDTRACE_HOOKS
619 sleep_time -= lockstat_nsecs(&m->lock_object);
620 #endif
621 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
622 #ifdef KDTRACE_HOOKS
623 sleep_time += lockstat_nsecs(&m->lock_object);
624 sleep_cnt++;
625 #endif
626 v = MTX_READ_VALUE(m);
627 }
628 #ifdef KTR
629 if (cont_logged) {
630 CTR4(KTR_CONTENTION,
631 "contention end: %s acquired by %p at %s:%d",
632 m->lock_object.lo_name, (void *)tid, file, line);
633 }
634 #endif
635 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
636 if (__predict_true(!doing_lockprof))
637 return;
638 #endif
639 #ifdef KDTRACE_HOOKS
640 all_time += lockstat_nsecs(&m->lock_object);
641 #endif
642 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
643 waittime, file, line);
644 #ifdef KDTRACE_HOOKS
645 if (sleep_time)
646 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
647
648 /*
649 * Only record the loops spinning and not sleeping.
650 */
651 if (lda.spin_cnt > sleep_cnt)
652 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
653 #endif
654 }
655
656 static void
657 _mtx_lock_spin_failed(struct mtx *m)
658 {
659 struct thread *td;
660
661 td = mtx_owner(m);
662
663 /* If the mutex is unlocked, try again. */
664 if (td == NULL)
665 return;
666
667 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
668 m, m->lock_object.lo_name, td, td->td_tid);
669 #ifdef WITNESS
670 witness_display_spinlock(&m->lock_object, td, printf);
671 #endif
672 panic("spin lock held too long");
673 }
674
675 #ifdef SMP
676 /*
677 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
678 *
679 * This is only called if we need to actually spin for the lock. Recursion
680 * is handled inline.
681 */
682 void
683 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
684 int opts, const char *file, int line)
685 {
686 struct mtx *m;
687 struct lock_delay_arg lda;
688 #ifdef LOCK_PROFILING
689 int contested = 0;
690 uint64_t waittime = 0;
691 #endif
692 #ifdef KDTRACE_HOOKS
693 int64_t spin_time = 0;
694 #endif
695 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
696 int doing_lockprof;
697 #endif
698
699 if (SCHEDULER_STOPPED())
700 return;
701
702 lock_delay_arg_init(&lda, &mtx_spin_delay);
703 m = mtxlock2mtx(c);
704
705 if (__predict_false(v == MTX_UNOWNED))
706 v = MTX_READ_VALUE(m);
707
708 if (__predict_false(v == tid)) {
709 m->mtx_recurse++;
710 return;
711 }
712
713 if (LOCK_LOG_TEST(&m->lock_object, opts))
714 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
715 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
716 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
717
718 #ifdef HWPMC_HOOKS
719 PMC_SOFT_CALL( , , lock, failed);
720 #endif
721 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
722 #ifdef LOCK_PROFILING
723 doing_lockprof = 1;
724 #elif defined(KDTRACE_HOOKS)
725 doing_lockprof = lockstat_enabled;
726 if (__predict_false(doing_lockprof))
727 spin_time -= lockstat_nsecs(&m->lock_object);
728 #endif
729 for (;;) {
730 if (v == MTX_UNOWNED) {
731 if (_mtx_obtain_lock_fetch(m, &v, tid))
732 break;
733 continue;
734 }
735 /* Give interrupts a chance while we spin. */
736 spinlock_exit();
737 do {
738 if (lda.spin_cnt < 10000000) {
739 lock_delay(&lda);
740 } else {
741 lda.spin_cnt++;
742 if (lda.spin_cnt < 60000000 || kdb_active ||
743 panicstr != NULL)
744 DELAY(1);
745 else
746 _mtx_lock_spin_failed(m);
747 cpu_spinwait();
748 }
749 v = MTX_READ_VALUE(m);
750 } while (v != MTX_UNOWNED);
751 spinlock_enter();
752 }
753
754 if (LOCK_LOG_TEST(&m->lock_object, opts))
755 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
756 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
757 "running");
758
759 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
760 if (__predict_true(!doing_lockprof))
761 return;
762 #endif
763 #ifdef KDTRACE_HOOKS
764 spin_time += lockstat_nsecs(&m->lock_object);
765 #endif
766 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
767 contested, waittime, file, line);
768 #ifdef KDTRACE_HOOKS
769 if (spin_time != 0)
770 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
771 #endif
772 }
773 #endif /* SMP */
774
775 void
776 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
777 {
778 struct mtx *m;
779 uintptr_t tid, v;
780 struct lock_delay_arg lda;
781 #ifdef LOCK_PROFILING
782 int contested = 0;
783 uint64_t waittime = 0;
784 #endif
785 #ifdef KDTRACE_HOOKS
786 int64_t spin_time = 0;
787 #endif
788 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
789 int doing_lockprof = 1;
790 #endif
791
792 tid = (uintptr_t)curthread;
793
794 if (SCHEDULER_STOPPED()) {
795 /*
796 * Ensure that spinlock sections are balanced even when the
797 * scheduler is stopped, since we may otherwise inadvertently
798 * re-enable interrupts while dumping core.
799 */
800 spinlock_enter();
801 return;
802 }
803
804 lock_delay_arg_init(&lda, &mtx_spin_delay);
805
806 #ifdef LOCK_PROFILING
807 doing_lockprof = 1;
808 #elif defined(KDTRACE_HOOKS)
809 doing_lockprof = lockstat_enabled;
810 if (__predict_false(doing_lockprof))
811 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
812 #endif
813 for (;;) {
814 retry:
815 v = MTX_UNOWNED;
816 spinlock_enter();
817 m = td->td_lock;
818 KASSERT(m->mtx_lock != MTX_DESTROYED,
819 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
820 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
821 ("thread_lock() of sleep mutex %s @ %s:%d",
822 m->lock_object.lo_name, file, line));
823 if (mtx_owned(m))
824 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
825 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
826 m->lock_object.lo_name, file, line));
827 WITNESS_CHECKORDER(&m->lock_object,
828 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
829 for (;;) {
830 if (_mtx_obtain_lock_fetch(m, &v, tid))
831 break;
832 if (v == MTX_UNOWNED)
833 continue;
834 if (v == tid) {
835 m->mtx_recurse++;
836 break;
837 }
838 #ifdef HWPMC_HOOKS
839 PMC_SOFT_CALL( , , lock, failed);
840 #endif
841 lock_profile_obtain_lock_failed(&m->lock_object,
842 &contested, &waittime);
843 /* Give interrupts a chance while we spin. */
844 spinlock_exit();
845 do {
846 if (lda.spin_cnt < 10000000) {
847 lock_delay(&lda);
848 } else {
849 lda.spin_cnt++;
850 if (lda.spin_cnt < 60000000 ||
851 kdb_active || panicstr != NULL)
852 DELAY(1);
853 else
854 _mtx_lock_spin_failed(m);
855 cpu_spinwait();
856 }
857 if (m != td->td_lock)
858 goto retry;
859 v = MTX_READ_VALUE(m);
860 } while (v != MTX_UNOWNED);
861 spinlock_enter();
862 }
863 if (m == td->td_lock)
864 break;
865 __mtx_unlock_spin(m); /* does spinlock_exit() */
866 }
867 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
868 line);
869 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
870
871 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
872 if (__predict_true(!doing_lockprof))
873 return;
874 #endif
875 #ifdef KDTRACE_HOOKS
876 spin_time += lockstat_nsecs(&m->lock_object);
877 #endif
878 if (m->mtx_recurse == 0)
879 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
880 contested, waittime, file, line);
881 #ifdef KDTRACE_HOOKS
882 if (spin_time != 0)
883 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
884 #endif
885 }
886
887 struct mtx *
888 thread_lock_block(struct thread *td)
889 {
890 struct mtx *lock;
891
892 THREAD_LOCK_ASSERT(td, MA_OWNED);
893 lock = td->td_lock;
894 td->td_lock = &blocked_lock;
895 mtx_unlock_spin(lock);
896
897 return (lock);
898 }
899
900 void
901 thread_lock_unblock(struct thread *td, struct mtx *new)
902 {
903 mtx_assert(new, MA_OWNED);
904 MPASS(td->td_lock == &blocked_lock);
905 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
906 }
907
908 void
909 thread_lock_set(struct thread *td, struct mtx *new)
910 {
911 struct mtx *lock;
912
913 mtx_assert(new, MA_OWNED);
914 THREAD_LOCK_ASSERT(td, MA_OWNED);
915 lock = td->td_lock;
916 td->td_lock = new;
917 mtx_unlock_spin(lock);
918 }
919
920 /*
921 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
922 *
923 * We are only called here if the lock is recursed, contested (i.e. we
924 * need to wake up a blocked thread) or lockstat probe is active.
925 */
926 #if LOCK_DEBUG > 0
927 void
928 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
929 #else
930 void
931 __mtx_unlock_sleep(volatile uintptr_t *c)
932 #endif
933 {
934 struct mtx *m;
935 struct turnstile *ts;
936 uintptr_t tid, v;
937
938 if (SCHEDULER_STOPPED())
939 return;
940
941 tid = (uintptr_t)curthread;
942 m = mtxlock2mtx(c);
943 v = MTX_READ_VALUE(m);
944
945 if (v & MTX_RECURSED) {
946 if (--(m->mtx_recurse) == 0)
947 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
948 if (LOCK_LOG_TEST(&m->lock_object, opts))
949 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
950 return;
951 }
952
953 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
954 if (v == tid && _mtx_release_lock(m, tid))
955 return;
956
957 /*
958 * We have to lock the chain before the turnstile so this turnstile
959 * can be removed from the hash list if it is empty.
960 */
961 turnstile_chain_lock(&m->lock_object);
962 ts = turnstile_lookup(&m->lock_object);
963 if (LOCK_LOG_TEST(&m->lock_object, opts))
964 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
965 MPASS(ts != NULL);
966 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
967 _mtx_release_lock_quick(m);
968
969 /*
970 * This turnstile is now no longer associated with the mutex. We can
971 * unlock the chain lock so a new turnstile may take it's place.
972 */
973 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
974 turnstile_chain_unlock(&m->lock_object);
975 }
976
977 /*
978 * All the unlocking of MTX_SPIN locks is done inline.
979 * See the __mtx_unlock_spin() macro for the details.
980 */
981
982 /*
983 * The backing function for the INVARIANTS-enabled mtx_assert()
984 */
985 #ifdef INVARIANT_SUPPORT
986 void
987 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
988 {
989 const struct mtx *m;
990
991 if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
992 return;
993
994 m = mtxlock2mtx(c);
995
996 switch (what) {
997 case MA_OWNED:
998 case MA_OWNED | MA_RECURSED:
999 case MA_OWNED | MA_NOTRECURSED:
1000 if (!mtx_owned(m))
1001 panic("mutex %s not owned at %s:%d",
1002 m->lock_object.lo_name, file, line);
1003 if (mtx_recursed(m)) {
1004 if ((what & MA_NOTRECURSED) != 0)
1005 panic("mutex %s recursed at %s:%d",
1006 m->lock_object.lo_name, file, line);
1007 } else if ((what & MA_RECURSED) != 0) {
1008 panic("mutex %s unrecursed at %s:%d",
1009 m->lock_object.lo_name, file, line);
1010 }
1011 break;
1012 case MA_NOTOWNED:
1013 if (mtx_owned(m))
1014 panic("mutex %s owned at %s:%d",
1015 m->lock_object.lo_name, file, line);
1016 break;
1017 default:
1018 panic("unknown mtx_assert at %s:%d", file, line);
1019 }
1020 }
1021 #endif
1022
1023 /*
1024 * General init routine used by the MTX_SYSINIT() macro.
1025 */
1026 void
1027 mtx_sysinit(void *arg)
1028 {
1029 struct mtx_args *margs = arg;
1030
1031 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1032 margs->ma_opts);
1033 }
1034
1035 /*
1036 * Mutex initialization routine; initialize lock `m' of type contained in
1037 * `opts' with options contained in `opts' and name `name.' The optional
1038 * lock type `type' is used as a general lock category name for use with
1039 * witness.
1040 */
1041 void
1042 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
1043 {
1044 struct mtx *m;
1045 struct lock_class *class;
1046 int flags;
1047
1048 m = mtxlock2mtx(c);
1049
1050 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1051 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1052 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1053 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
1054 &m->mtx_lock));
1055
1056 /* Determine lock class and lock flags. */
1057 if (opts & MTX_SPIN)
1058 class = &lock_class_mtx_spin;
1059 else
1060 class = &lock_class_mtx_sleep;
1061 flags = 0;
1062 if (opts & MTX_QUIET)
1063 flags |= LO_QUIET;
1064 if (opts & MTX_RECURSE)
1065 flags |= LO_RECURSABLE;
1066 if ((opts & MTX_NOWITNESS) == 0)
1067 flags |= LO_WITNESS;
1068 if (opts & MTX_DUPOK)
1069 flags |= LO_DUPOK;
1070 if (opts & MTX_NOPROFILE)
1071 flags |= LO_NOPROFILE;
1072 if (opts & MTX_NEW)
1073 flags |= LO_NEW;
1074
1075 /* Initialize mutex. */
1076 lock_init(&m->lock_object, class, name, type, flags);
1077
1078 m->mtx_lock = MTX_UNOWNED;
1079 m->mtx_recurse = 0;
1080 }
1081
1082 /*
1083 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1084 * passed in as a flag here because if the corresponding mtx_init() was
1085 * called with MTX_QUIET set, then it will already be set in the mutex's
1086 * flags.
1087 */
1088 void
1089 _mtx_destroy(volatile uintptr_t *c)
1090 {
1091 struct mtx *m;
1092
1093 m = mtxlock2mtx(c);
1094
1095 if (!mtx_owned(m))
1096 MPASS(mtx_unowned(m));
1097 else {
1098 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1099
1100 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1101 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1102 spinlock_exit();
1103 else
1104 TD_LOCKS_DEC(curthread);
1105
1106 lock_profile_release_lock(&m->lock_object);
1107 /* Tell witness this isn't locked to make it happy. */
1108 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1109 __LINE__);
1110 }
1111
1112 m->mtx_lock = MTX_DESTROYED;
1113 lock_destroy(&m->lock_object);
1114 }
1115
1116 /*
1117 * Intialize the mutex code and system mutexes. This is called from the MD
1118 * startup code prior to mi_startup(). The per-CPU data space needs to be
1119 * setup before this is called.
1120 */
1121 void
1122 mutex_init(void)
1123 {
1124
1125 /* Setup turnstiles so that sleep mutexes work. */
1126 init_turnstiles();
1127
1128 /*
1129 * Initialize mutexes.
1130 */
1131 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1132 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1133 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1134 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1135 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN);
1136 mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN);
1137 mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN);
1138 mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN);
1139 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1140 mtx_lock(&Giant);
1141 }
1142
1143 #ifdef DDB
1144 void
1145 db_show_mtx(const struct lock_object *lock)
1146 {
1147 struct thread *td;
1148 const struct mtx *m;
1149
1150 m = (const struct mtx *)lock;
1151
1152 db_printf(" flags: {");
1153 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1154 db_printf("SPIN");
1155 else
1156 db_printf("DEF");
1157 if (m->lock_object.lo_flags & LO_RECURSABLE)
1158 db_printf(", RECURSE");
1159 if (m->lock_object.lo_flags & LO_DUPOK)
1160 db_printf(", DUPOK");
1161 db_printf("}\n");
1162 db_printf(" state: {");
1163 if (mtx_unowned(m))
1164 db_printf("UNOWNED");
1165 else if (mtx_destroyed(m))
1166 db_printf("DESTROYED");
1167 else {
1168 db_printf("OWNED");
1169 if (m->mtx_lock & MTX_CONTESTED)
1170 db_printf(", CONTESTED");
1171 if (m->mtx_lock & MTX_RECURSED)
1172 db_printf(", RECURSED");
1173 }
1174 db_printf("}\n");
1175 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1176 td = mtx_owner(m);
1177 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1178 td->td_tid, td->td_proc->p_pid, td->td_name);
1179 if (mtx_recursed(m))
1180 db_printf(" recursed: %d\n", m->mtx_recurse);
1181 }
1182 }
1183 #endif
Cache object: 085172acc1640fc7e687ba59289a5e7a
|