FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_global.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_kdtrace.h"
44 #include "opt_sched.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/bus.h>
49 #include <sys/conf.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sbuf.h>
60 #include <sys/smp.h>
61 #include <sys/sysctl.h>
62 #include <sys/turnstile.h>
63 #include <sys/vmmeter.h>
64 #include <sys/lock_profile.h>
65
66 #include <machine/atomic.h>
67 #include <machine/bus.h>
68 #include <machine/cpu.h>
69
70 #include <ddb/ddb.h>
71
72 #include <fs/devfs/devfs_int.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76
77 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78 #define ADAPTIVE_MUTEXES
79 #endif
80
81 #ifdef HWPMC_HOOKS
82 #include <sys/pmckern.h>
83 PMC_SOFT_DEFINE( , , lock, failed);
84 #endif
85
86 /*
87 * Return the mutex address when the lock cookie address is provided.
88 * This functionality assumes that struct mtx* have a member named mtx_lock.
89 */
90 #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
91
92 /*
93 * Internal utility macros.
94 */
95 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
96
97 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
98
99 #define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
100
101 static void assert_mtx(const struct lock_object *lock, int what);
102 #ifdef DDB
103 static void db_show_mtx(const struct lock_object *lock);
104 #endif
105 static void lock_mtx(struct lock_object *lock, uintptr_t how);
106 static void lock_spin(struct lock_object *lock, uintptr_t how);
107 #ifdef KDTRACE_HOOKS
108 static int owner_mtx(const struct lock_object *lock,
109 struct thread **owner);
110 #endif
111 static uintptr_t unlock_mtx(struct lock_object *lock);
112 static uintptr_t unlock_spin(struct lock_object *lock);
113
114 /*
115 * Lock classes for sleep and spin mutexes.
116 */
117 struct lock_class lock_class_mtx_sleep = {
118 .lc_name = "sleep mutex",
119 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
120 .lc_assert = assert_mtx,
121 #ifdef DDB
122 .lc_ddb_show = db_show_mtx,
123 #endif
124 .lc_lock = lock_mtx,
125 .lc_unlock = unlock_mtx,
126 #ifdef KDTRACE_HOOKS
127 .lc_owner = owner_mtx,
128 #endif
129 };
130 struct lock_class lock_class_mtx_spin = {
131 .lc_name = "spin mutex",
132 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
133 .lc_assert = assert_mtx,
134 #ifdef DDB
135 .lc_ddb_show = db_show_mtx,
136 #endif
137 .lc_lock = lock_spin,
138 .lc_unlock = unlock_spin,
139 #ifdef KDTRACE_HOOKS
140 .lc_owner = owner_mtx,
141 #endif
142 };
143
144 #ifdef ADAPTIVE_MUTEXES
145 static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging");
146
147 static struct lock_delay_config mtx_delay = {
148 .initial = 1000,
149 .step = 500,
150 .min = 100,
151 .max = 5000,
152 };
153
154 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial,
155 0, "");
156 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step,
157 0, "");
158 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min,
159 0, "");
160 SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
161 0, "");
162
163 static void
164 mtx_delay_sysinit(void *dummy)
165 {
166
167 mtx_delay.initial = mp_ncpus * 25;
168 mtx_delay.step = (mp_ncpus * 25) / 2;
169 mtx_delay.min = mp_ncpus * 5;
170 mtx_delay.max = mp_ncpus * 25 * 10;
171 }
172 LOCK_DELAY_SYSINIT(mtx_delay_sysinit);
173 #endif
174
175 /*
176 * System-wide mutexes
177 */
178 struct mtx blocked_lock;
179 struct mtx Giant;
180
181 void
182 assert_mtx(const struct lock_object *lock, int what)
183 {
184
185 mtx_assert((const struct mtx *)lock, what);
186 }
187
188 void
189 lock_mtx(struct lock_object *lock, uintptr_t how)
190 {
191
192 mtx_lock((struct mtx *)lock);
193 }
194
195 void
196 lock_spin(struct lock_object *lock, uintptr_t how)
197 {
198
199 panic("spin locks can only use msleep_spin");
200 }
201
202 uintptr_t
203 unlock_mtx(struct lock_object *lock)
204 {
205 struct mtx *m;
206
207 m = (struct mtx *)lock;
208 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
209 mtx_unlock(m);
210 return (0);
211 }
212
213 uintptr_t
214 unlock_spin(struct lock_object *lock)
215 {
216
217 panic("spin locks can only use msleep_spin");
218 }
219
220 #ifdef KDTRACE_HOOKS
221 int
222 owner_mtx(const struct lock_object *lock, struct thread **owner)
223 {
224 const struct mtx *m = (const struct mtx *)lock;
225
226 *owner = mtx_owner(m);
227 return (mtx_unowned(m) == 0);
228 }
229 #endif
230
231 /*
232 * Function versions of the inlined __mtx_* macros. These are used by
233 * modules and can also be called from assembly language if needed.
234 */
235 void
236 __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
237 {
238 struct mtx *m;
239
240 if (SCHEDULER_STOPPED())
241 return;
242
243 m = mtxlock2mtx(c);
244
245 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
246 ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
247 curthread, m->lock_object.lo_name, file, line));
248 KASSERT(m->mtx_lock != MTX_DESTROYED,
249 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
250 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
251 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
252 file, line));
253 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
254 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
255
256 __mtx_lock(m, curthread, opts, file, line);
257 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
258 line);
259 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
260 file, line);
261 curthread->td_locks++;
262 }
263
264 void
265 __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
266 {
267 struct mtx *m;
268
269 if (SCHEDULER_STOPPED())
270 return;
271
272 m = mtxlock2mtx(c);
273
274 KASSERT(m->mtx_lock != MTX_DESTROYED,
275 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
276 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
277 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
278 file, line));
279 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
280 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
281 line);
282 mtx_assert(m, MA_OWNED);
283
284 if (m->mtx_recurse == 0)
285 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
286 __mtx_unlock(m, curthread, opts, file, line);
287 curthread->td_locks--;
288 }
289
290 void
291 __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
292 int line)
293 {
294 struct mtx *m;
295
296 if (SCHEDULER_STOPPED())
297 return;
298
299 m = mtxlock2mtx(c);
300
301 KASSERT(m->mtx_lock != MTX_DESTROYED,
302 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
303 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
304 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
305 m->lock_object.lo_name, file, line));
306 if (mtx_owned(m))
307 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
308 (opts & MTX_RECURSE) != 0,
309 ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
310 m->lock_object.lo_name, file, line));
311 opts &= ~MTX_RECURSE;
312 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
313 file, line, NULL);
314 __mtx_lock_spin(m, curthread, opts, file, line);
315 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
316 line);
317 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
318 }
319
320 int
321 __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
322 int line)
323 {
324 struct mtx *m;
325
326 if (SCHEDULER_STOPPED())
327 return (1);
328
329 m = mtxlock2mtx(c);
330
331 KASSERT(m->mtx_lock != MTX_DESTROYED,
332 ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
333 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
334 ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
335 m->lock_object.lo_name, file, line));
336 KASSERT((opts & MTX_RECURSE) == 0,
337 ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
338 m->lock_object.lo_name, file, line));
339 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
340 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
341 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
342 return (1);
343 }
344 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
345 return (0);
346 }
347
348 void
349 __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
350 int line)
351 {
352 struct mtx *m;
353
354 if (SCHEDULER_STOPPED())
355 return;
356
357 m = mtxlock2mtx(c);
358
359 KASSERT(m->mtx_lock != MTX_DESTROYED,
360 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
361 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
362 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
363 m->lock_object.lo_name, file, line));
364 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
365 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
366 line);
367 mtx_assert(m, MA_OWNED);
368
369 __mtx_unlock_spin(m);
370 }
371
372 /*
373 * The important part of mtx_trylock{,_flags}()
374 * Tries to acquire lock `m.' If this function is called on a mutex that
375 * is already owned, it will recursively acquire the lock.
376 */
377 int
378 _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
379 {
380 struct mtx *m;
381 #ifdef LOCK_PROFILING
382 uint64_t waittime = 0;
383 int contested = 0;
384 #endif
385 int rval;
386
387 if (SCHEDULER_STOPPED())
388 return (1);
389
390 m = mtxlock2mtx(c);
391
392 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
393 ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
394 curthread, m->lock_object.lo_name, file, line));
395 KASSERT(m->mtx_lock != MTX_DESTROYED,
396 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
397 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
398 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
399 file, line));
400
401 if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
402 (opts & MTX_RECURSE) != 0)) {
403 m->mtx_recurse++;
404 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
405 rval = 1;
406 } else
407 rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
408 opts &= ~MTX_RECURSE;
409
410 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
411 if (rval) {
412 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
413 file, line);
414 curthread->td_locks++;
415 if (m->mtx_recurse == 0)
416 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
417 m, contested, waittime, file, line);
418
419 }
420
421 return (rval);
422 }
423
424 /*
425 * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
426 *
427 * We call this if the lock is either contested (i.e. we need to go to
428 * sleep waiting for it), or if we need to recurse on it.
429 */
430 void
431 __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
432 const char *file, int line)
433 {
434 struct mtx *m;
435 struct turnstile *ts;
436 uintptr_t v;
437 #ifdef ADAPTIVE_MUTEXES
438 volatile struct thread *owner;
439 #endif
440 #ifdef KTR
441 int cont_logged = 0;
442 #endif
443 #ifdef LOCK_PROFILING
444 int contested = 0;
445 uint64_t waittime = 0;
446 #endif
447 #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
448 struct lock_delay_arg lda;
449 #endif
450 #ifdef KDTRACE_HOOKS
451 u_int sleep_cnt = 0;
452 int64_t sleep_time = 0;
453 int64_t all_time = 0;
454 #endif
455
456 if (SCHEDULER_STOPPED())
457 return;
458
459 #if defined(ADAPTIVE_MUTEXES)
460 lock_delay_arg_init(&lda, &mtx_delay);
461 #elif defined(KDTRACE_HOOKS)
462 lock_delay_arg_init(&lda, NULL);
463 #endif
464 m = mtxlock2mtx(c);
465
466 if (mtx_owned(m)) {
467 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
468 (opts & MTX_RECURSE) != 0,
469 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
470 m->lock_object.lo_name, file, line));
471 opts &= ~MTX_RECURSE;
472 m->mtx_recurse++;
473 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
474 if (LOCK_LOG_TEST(&m->lock_object, opts))
475 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
476 return;
477 }
478 opts &= ~MTX_RECURSE;
479
480 #ifdef HWPMC_HOOKS
481 PMC_SOFT_CALL( , , lock, failed);
482 #endif
483 lock_profile_obtain_lock_failed(&m->lock_object,
484 &contested, &waittime);
485 if (LOCK_LOG_TEST(&m->lock_object, opts))
486 CTR4(KTR_LOCK,
487 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
488 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
489 #ifdef KDTRACE_HOOKS
490 all_time -= lockstat_nsecs(&m->lock_object);
491 #endif
492
493 for (;;) {
494 if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
495 break;
496 #ifdef KDTRACE_HOOKS
497 lda.spin_cnt++;
498 #endif
499 #ifdef ADAPTIVE_MUTEXES
500 /*
501 * If the owner is running on another CPU, spin until the
502 * owner stops running or the state of the lock changes.
503 */
504 v = m->mtx_lock;
505 if (v != MTX_UNOWNED) {
506 owner = (struct thread *)(v & ~MTX_FLAGMASK);
507 if (TD_IS_RUNNING(owner)) {
508 if (LOCK_LOG_TEST(&m->lock_object, 0))
509 CTR3(KTR_LOCK,
510 "%s: spinning on %p held by %p",
511 __func__, m, owner);
512 KTR_STATE1(KTR_SCHED, "thread",
513 sched_tdname((struct thread *)tid),
514 "spinning", "lockname:\"%s\"",
515 m->lock_object.lo_name);
516 while (mtx_owner(m) == owner &&
517 TD_IS_RUNNING(owner))
518 lock_delay(&lda);
519 KTR_STATE0(KTR_SCHED, "thread",
520 sched_tdname((struct thread *)tid),
521 "running");
522 continue;
523 }
524 }
525 #endif
526
527 ts = turnstile_trywait(&m->lock_object);
528 v = m->mtx_lock;
529
530 /*
531 * Check if the lock has been released while spinning for
532 * the turnstile chain lock.
533 */
534 if (v == MTX_UNOWNED) {
535 turnstile_cancel(ts);
536 continue;
537 }
538
539 #ifdef ADAPTIVE_MUTEXES
540 /*
541 * The current lock owner might have started executing
542 * on another CPU (or the lock could have changed
543 * owners) while we were waiting on the turnstile
544 * chain lock. If so, drop the turnstile lock and try
545 * again.
546 */
547 owner = (struct thread *)(v & ~MTX_FLAGMASK);
548 if (TD_IS_RUNNING(owner)) {
549 turnstile_cancel(ts);
550 continue;
551 }
552 #endif
553
554 /*
555 * If the mutex isn't already contested and a failure occurs
556 * setting the contested bit, the mutex was either released
557 * or the state of the MTX_RECURSED bit changed.
558 */
559 if ((v & MTX_CONTESTED) == 0 &&
560 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
561 turnstile_cancel(ts);
562 continue;
563 }
564
565 /*
566 * We definitely must sleep for this lock.
567 */
568 mtx_assert(m, MA_NOTOWNED);
569
570 #ifdef KTR
571 if (!cont_logged) {
572 CTR6(KTR_CONTENTION,
573 "contention: %p at %s:%d wants %s, taken by %s:%d",
574 (void *)tid, file, line, m->lock_object.lo_name,
575 WITNESS_FILE(&m->lock_object),
576 WITNESS_LINE(&m->lock_object));
577 cont_logged = 1;
578 }
579 #endif
580
581 /*
582 * Block on the turnstile.
583 */
584 #ifdef KDTRACE_HOOKS
585 sleep_time -= lockstat_nsecs(&m->lock_object);
586 #endif
587 turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
588 #ifdef KDTRACE_HOOKS
589 sleep_time += lockstat_nsecs(&m->lock_object);
590 sleep_cnt++;
591 #endif
592 }
593 #ifdef KDTRACE_HOOKS
594 all_time += lockstat_nsecs(&m->lock_object);
595 #endif
596 #ifdef KTR
597 if (cont_logged) {
598 CTR4(KTR_CONTENTION,
599 "contention end: %s acquired by %p at %s:%d",
600 m->lock_object.lo_name, (void *)tid, file, line);
601 }
602 #endif
603 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
604 waittime, file, line);
605 #ifdef KDTRACE_HOOKS
606 if (sleep_time)
607 LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);
608
609 /*
610 * Only record the loops spinning and not sleeping.
611 */
612 if (lda.spin_cnt > sleep_cnt)
613 LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (all_time - sleep_time));
614 #endif
615 }
616
617 static void
618 _mtx_lock_spin_failed(struct mtx *m)
619 {
620 struct thread *td;
621
622 td = mtx_owner(m);
623
624 /* If the mutex is unlocked, try again. */
625 if (td == NULL)
626 return;
627
628 printf( "spin lock %p (%s) held by %p (tid %d) too long\n",
629 m, m->lock_object.lo_name, td, td->td_tid);
630 #ifdef WITNESS
631 witness_display_spinlock(&m->lock_object, td, printf);
632 #endif
633 panic("spin lock held too long");
634 }
635
636 #ifdef SMP
637 /*
638 * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock.
639 *
640 * This is only called if we need to actually spin for the lock. Recursion
641 * is handled inline.
642 */
643 void
644 _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
645 const char *file, int line)
646 {
647 struct mtx *m;
648 int i = 0;
649 #ifdef LOCK_PROFILING
650 int contested = 0;
651 uint64_t waittime = 0;
652 #endif
653 #ifdef KDTRACE_HOOKS
654 int64_t spin_time = 0;
655 #endif
656
657 if (SCHEDULER_STOPPED())
658 return;
659
660 m = mtxlock2mtx(c);
661
662 if (LOCK_LOG_TEST(&m->lock_object, opts))
663 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
664 KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
665 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
666
667 #ifdef HWPMC_HOOKS
668 PMC_SOFT_CALL( , , lock, failed);
669 #endif
670 lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
671 #ifdef KDTRACE_HOOKS
672 spin_time -= lockstat_nsecs(&m->lock_object);
673 #endif
674 for (;;) {
675 if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
676 break;
677 /* Give interrupts a chance while we spin. */
678 spinlock_exit();
679 while (m->mtx_lock != MTX_UNOWNED) {
680 if (i++ < 10000000) {
681 cpu_spinwait();
682 continue;
683 }
684 if (i < 60000000 || kdb_active || panicstr != NULL)
685 DELAY(1);
686 else
687 _mtx_lock_spin_failed(m);
688 cpu_spinwait();
689 }
690 spinlock_enter();
691 }
692 #ifdef KDTRACE_HOOKS
693 spin_time += lockstat_nsecs(&m->lock_object);
694 #endif
695
696 if (LOCK_LOG_TEST(&m->lock_object, opts))
697 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
698 KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
699 "running");
700
701 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
702 contested, waittime, (file), (line));
703 #ifdef KDTRACE_HOOKS
704 if (spin_time != 0)
705 LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, spin_time);
706 #endif
707 }
708 #endif /* SMP */
709
710 void
711 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
712 {
713 struct mtx *m;
714 uintptr_t tid;
715 int i;
716 #ifdef LOCK_PROFILING
717 int contested = 0;
718 uint64_t waittime = 0;
719 #endif
720 #ifdef KDTRACE_HOOKS
721 int64_t spin_time = 0;
722 #endif
723
724 i = 0;
725 tid = (uintptr_t)curthread;
726
727 if (SCHEDULER_STOPPED()) {
728 /*
729 * Ensure that spinlock sections are balanced even when the
730 * scheduler is stopped, since we may otherwise inadvertently
731 * re-enable interrupts while dumping core.
732 */
733 spinlock_enter();
734 return;
735 }
736
737 #ifdef KDTRACE_HOOKS
738 spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
739 #endif
740 for (;;) {
741 retry:
742 spinlock_enter();
743 m = td->td_lock;
744 KASSERT(m->mtx_lock != MTX_DESTROYED,
745 ("thread_lock() of destroyed mutex @ %s:%d", file, line));
746 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
747 ("thread_lock() of sleep mutex %s @ %s:%d",
748 m->lock_object.lo_name, file, line));
749 if (mtx_owned(m))
750 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
751 ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
752 m->lock_object.lo_name, file, line));
753 WITNESS_CHECKORDER(&m->lock_object,
754 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
755 for (;;) {
756 if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
757 break;
758 if (m->mtx_lock == tid) {
759 m->mtx_recurse++;
760 break;
761 }
762 #ifdef HWPMC_HOOKS
763 PMC_SOFT_CALL( , , lock, failed);
764 #endif
765 lock_profile_obtain_lock_failed(&m->lock_object,
766 &contested, &waittime);
767 /* Give interrupts a chance while we spin. */
768 spinlock_exit();
769 while (m->mtx_lock != MTX_UNOWNED) {
770 if (i++ < 10000000)
771 cpu_spinwait();
772 else if (i < 60000000 ||
773 kdb_active || panicstr != NULL)
774 DELAY(1);
775 else
776 _mtx_lock_spin_failed(m);
777 cpu_spinwait();
778 if (m != td->td_lock)
779 goto retry;
780 }
781 spinlock_enter();
782 }
783 if (m == td->td_lock)
784 break;
785 __mtx_unlock_spin(m); /* does spinlock_exit() */
786 }
787 #ifdef KDTRACE_HOOKS
788 spin_time += lockstat_nsecs(&m->lock_object);
789 #endif
790 if (m->mtx_recurse == 0)
791 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
792 m, contested, waittime, (file), (line));
793 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
794 line);
795 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
796 LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_time);
797 }
798
799 struct mtx *
800 thread_lock_block(struct thread *td)
801 {
802 struct mtx *lock;
803
804 THREAD_LOCK_ASSERT(td, MA_OWNED);
805 lock = td->td_lock;
806 td->td_lock = &blocked_lock;
807 mtx_unlock_spin(lock);
808
809 return (lock);
810 }
811
812 void
813 thread_lock_unblock(struct thread *td, struct mtx *new)
814 {
815 mtx_assert(new, MA_OWNED);
816 MPASS(td->td_lock == &blocked_lock);
817 atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
818 }
819
820 void
821 thread_lock_set(struct thread *td, struct mtx *new)
822 {
823 struct mtx *lock;
824
825 mtx_assert(new, MA_OWNED);
826 THREAD_LOCK_ASSERT(td, MA_OWNED);
827 lock = td->td_lock;
828 td->td_lock = new;
829 mtx_unlock_spin(lock);
830 }
831
832 /*
833 * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
834 *
835 * We are only called here if the lock is recursed or contested (i.e. we
836 * need to wake up a blocked thread).
837 */
838 void
839 __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
840 {
841 struct mtx *m;
842 struct turnstile *ts;
843
844 if (SCHEDULER_STOPPED())
845 return;
846
847 m = mtxlock2mtx(c);
848
849 if (mtx_recursed(m)) {
850 if (--(m->mtx_recurse) == 0)
851 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
852 if (LOCK_LOG_TEST(&m->lock_object, opts))
853 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
854 return;
855 }
856
857 /*
858 * We have to lock the chain before the turnstile so this turnstile
859 * can be removed from the hash list if it is empty.
860 */
861 turnstile_chain_lock(&m->lock_object);
862 ts = turnstile_lookup(&m->lock_object);
863 if (LOCK_LOG_TEST(&m->lock_object, opts))
864 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
865 MPASS(ts != NULL);
866 turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
867 _mtx_release_lock_quick(m);
868
869 /*
870 * This turnstile is now no longer associated with the mutex. We can
871 * unlock the chain lock so a new turnstile may take it's place.
872 */
873 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
874 turnstile_chain_unlock(&m->lock_object);
875 }
876
877 /*
878 * All the unlocking of MTX_SPIN locks is done inline.
879 * See the __mtx_unlock_spin() macro for the details.
880 */
881
882 /*
883 * The backing function for the INVARIANTS-enabled mtx_assert()
884 */
885 #ifdef INVARIANT_SUPPORT
886 void
887 __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line)
888 {
889 const struct mtx *m;
890
891 if (panicstr != NULL || dumping || SCHEDULER_STOPPED())
892 return;
893
894 m = mtxlock2mtx(c);
895
896 switch (what) {
897 case MA_OWNED:
898 case MA_OWNED | MA_RECURSED:
899 case MA_OWNED | MA_NOTRECURSED:
900 if (!mtx_owned(m))
901 panic("mutex %s not owned at %s:%d",
902 m->lock_object.lo_name, file, line);
903 if (mtx_recursed(m)) {
904 if ((what & MA_NOTRECURSED) != 0)
905 panic("mutex %s recursed at %s:%d",
906 m->lock_object.lo_name, file, line);
907 } else if ((what & MA_RECURSED) != 0) {
908 panic("mutex %s unrecursed at %s:%d",
909 m->lock_object.lo_name, file, line);
910 }
911 break;
912 case MA_NOTOWNED:
913 if (mtx_owned(m))
914 panic("mutex %s owned at %s:%d",
915 m->lock_object.lo_name, file, line);
916 break;
917 default:
918 panic("unknown mtx_assert at %s:%d", file, line);
919 }
920 }
921 #endif
922
923 /*
924 * The MUTEX_DEBUG-enabled mtx_validate()
925 *
926 * Most of these checks have been moved off into the LO_INITIALIZED flag
927 * maintained by the witness code.
928 */
929 #ifdef MUTEX_DEBUG
930
931 void mtx_validate(struct mtx *);
932
933 void
934 mtx_validate(struct mtx *m)
935 {
936
937 /*
938 * XXX: When kernacc() does not require Giant we can reenable this check
939 */
940 #ifdef notyet
941 /*
942 * Can't call kernacc() from early init386(), especially when
943 * initializing Giant mutex, because some stuff in kernacc()
944 * requires Giant itself.
945 */
946 if (!cold)
947 if (!kernacc((caddr_t)m, sizeof(m),
948 VM_PROT_READ | VM_PROT_WRITE))
949 panic("Can't read and write to mutex %p", m);
950 #endif
951 }
952 #endif
953
954 /*
955 * General init routine used by the MTX_SYSINIT() macro.
956 */
957 void
958 mtx_sysinit(void *arg)
959 {
960 struct mtx_args *margs = arg;
961
962 mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
963 margs->ma_opts);
964 }
965
966 /*
967 * Mutex initialization routine; initialize lock `m' of type contained in
968 * `opts' with options contained in `opts' and name `name.' The optional
969 * lock type `type' is used as a general lock category name for use with
970 * witness.
971 */
972 void
973 _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
974 {
975 struct mtx *m;
976 struct lock_class *class;
977 int flags;
978
979 m = mtxlock2mtx(c);
980
981 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
982 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
983 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
984 ("%s: mtx_lock not aligned for %s: %p", __func__, name,
985 &m->mtx_lock));
986
987 #ifdef MUTEX_DEBUG
988 /* Diagnostic and error correction */
989 mtx_validate(m);
990 #endif
991
992 /* Determine lock class and lock flags. */
993 if (opts & MTX_SPIN)
994 class = &lock_class_mtx_spin;
995 else
996 class = &lock_class_mtx_sleep;
997 flags = 0;
998 if (opts & MTX_QUIET)
999 flags |= LO_QUIET;
1000 if (opts & MTX_RECURSE)
1001 flags |= LO_RECURSABLE;
1002 if ((opts & MTX_NOWITNESS) == 0)
1003 flags |= LO_WITNESS;
1004 if (opts & MTX_DUPOK)
1005 flags |= LO_DUPOK;
1006 if (opts & MTX_NOPROFILE)
1007 flags |= LO_NOPROFILE;
1008 if (opts & MTX_NEW)
1009 flags |= LO_NEW;
1010
1011 /* Initialize mutex. */
1012 lock_init(&m->lock_object, class, name, type, flags);
1013
1014 m->mtx_lock = MTX_UNOWNED;
1015 m->mtx_recurse = 0;
1016 }
1017
1018 /*
1019 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1020 * passed in as a flag here because if the corresponding mtx_init() was
1021 * called with MTX_QUIET set, then it will already be set in the mutex's
1022 * flags.
1023 */
1024 void
1025 _mtx_destroy(volatile uintptr_t *c)
1026 {
1027 struct mtx *m;
1028
1029 m = mtxlock2mtx(c);
1030
1031 if (!mtx_owned(m))
1032 MPASS(mtx_unowned(m));
1033 else {
1034 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1035
1036 /* Perform the non-mtx related part of mtx_unlock_spin(). */
1037 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin)
1038 spinlock_exit();
1039 else
1040 curthread->td_locks--;
1041
1042 lock_profile_release_lock(&m->lock_object);
1043 /* Tell witness this isn't locked to make it happy. */
1044 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1045 __LINE__);
1046 }
1047
1048 m->mtx_lock = MTX_DESTROYED;
1049 lock_destroy(&m->lock_object);
1050 }
1051
1052 /*
1053 * Intialize the mutex code and system mutexes. This is called from the MD
1054 * startup code prior to mi_startup(). The per-CPU data space needs to be
1055 * setup before this is called.
1056 */
1057 void
1058 mutex_init(void)
1059 {
1060
1061 /* Setup turnstiles so that sleep mutexes work. */
1062 init_turnstiles();
1063
1064 /*
1065 * Initialize mutexes.
1066 */
1067 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
1068 mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN);
1069 blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */
1070 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
1071 mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
1072 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
1073 mtx_lock(&Giant);
1074 }
1075
1076 #ifdef DDB
1077 void
1078 db_show_mtx(const struct lock_object *lock)
1079 {
1080 struct thread *td;
1081 const struct mtx *m;
1082
1083 m = (const struct mtx *)lock;
1084
1085 db_printf(" flags: {");
1086 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
1087 db_printf("SPIN");
1088 else
1089 db_printf("DEF");
1090 if (m->lock_object.lo_flags & LO_RECURSABLE)
1091 db_printf(", RECURSE");
1092 if (m->lock_object.lo_flags & LO_DUPOK)
1093 db_printf(", DUPOK");
1094 db_printf("}\n");
1095 db_printf(" state: {");
1096 if (mtx_unowned(m))
1097 db_printf("UNOWNED");
1098 else if (mtx_destroyed(m))
1099 db_printf("DESTROYED");
1100 else {
1101 db_printf("OWNED");
1102 if (m->mtx_lock & MTX_CONTESTED)
1103 db_printf(", CONTESTED");
1104 if (m->mtx_lock & MTX_RECURSED)
1105 db_printf(", RECURSED");
1106 }
1107 db_printf("}\n");
1108 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1109 td = mtx_owner(m);
1110 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1111 td->td_tid, td->td_proc->p_pid, td->td_name);
1112 if (mtx_recursed(m))
1113 db_printf(" recursed: %d\n", m->mtx_recurse);
1114 }
1115 }
1116 #endif
Cache object: 1368b84dd9f25eac91375d7f0c983bda
|