FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/6.2/sys/kern/kern_mutex.c 164286 2006-11-14 20:42:41Z cvs2svn $");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41 #include "opt_mprof.h"
42 #include "opt_mutex_wake_all.h"
43 #include "opt_sched.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/kdb.h>
50 #include <sys/kernel.h>
51 #include <sys/ktr.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/sbuf.h>
59 #include <sys/sysctl.h>
60 #include <sys/turnstile.h>
61 #include <sys/vmmeter.h>
62
63 #include <machine/atomic.h>
64 #include <machine/bus.h>
65 #include <machine/clock.h>
66 #include <machine/cpu.h>
67
68 #include <ddb/ddb.h>
69
70 #include <fs/devfs/devfs_int.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_extern.h>
74
75 /*
76 * Force MUTEX_WAKE_ALL for now.
77 * single thread wakeup needs fixes to avoid race conditions with
78 * priority inheritance.
79 */
80 #ifndef MUTEX_WAKE_ALL
81 #define MUTEX_WAKE_ALL
82 #endif
83
84 /*
85 * Internal utility macros.
86 */
87 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
88
89 #define mtx_owner(m) (mtx_unowned((m)) ? NULL \
90 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
91
92 #ifdef DDB
93 static void db_show_mtx(struct lock_object *lock);
94 #endif
95
96 /*
97 * Lock classes for sleep and spin mutexes.
98 */
99 struct lock_class lock_class_mtx_sleep = {
100 "sleep mutex",
101 LC_SLEEPLOCK | LC_RECURSABLE,
102 #ifdef DDB
103 db_show_mtx
104 #endif
105 };
106 struct lock_class lock_class_mtx_spin = {
107 "spin mutex",
108 LC_SPINLOCK | LC_RECURSABLE,
109 #ifdef DDB
110 db_show_mtx
111 #endif
112 };
113
114 /*
115 * System-wide mutexes
116 */
117 struct mtx sched_lock;
118 struct mtx Giant;
119
120 #ifdef MUTEX_PROFILING
121 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
122 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
123 static int mutex_prof_enable = 0;
124 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
125 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
126
127 struct mutex_prof {
128 const char *name;
129 const char *file;
130 int line;
131 uintmax_t cnt_max;
132 uintmax_t cnt_tot;
133 uintmax_t cnt_cur;
134 uintmax_t cnt_contest_holding;
135 uintmax_t cnt_contest_locking;
136 struct mutex_prof *next;
137 };
138
139 /*
140 * mprof_buf is a static pool of profiling records to avoid possible
141 * reentrance of the memory allocation functions.
142 *
143 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
144 */
145 #ifdef MPROF_BUFFERS
146 #define NUM_MPROF_BUFFERS MPROF_BUFFERS
147 #else
148 #define NUM_MPROF_BUFFERS 1000
149 #endif
150 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
151 static int first_free_mprof_buf;
152 #ifndef MPROF_HASH_SIZE
153 #define MPROF_HASH_SIZE 1009
154 #endif
155 #if NUM_MPROF_BUFFERS >= MPROF_HASH_SIZE
156 #error MPROF_BUFFERS must be larger than MPROF_HASH_SIZE
157 #endif
158 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
159 /* SWAG: sbuf size = avg stat. line size * number of locks */
160 #define MPROF_SBUF_SIZE 256 * 400
161
162 static int mutex_prof_acquisitions;
163 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
164 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
165 static int mutex_prof_records;
166 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
167 &mutex_prof_records, 0, "Number of profiling records");
168 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
169 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
170 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
171 static int mutex_prof_rejected;
172 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
173 &mutex_prof_rejected, 0, "Number of rejected profiling records");
174 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
175 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
176 &mutex_prof_hashsize, 0, "Hash size");
177 static int mutex_prof_collisions = 0;
178 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
179 &mutex_prof_collisions, 0, "Number of hash collisions");
180
181 /*
182 * mprof_mtx protects the profiling buffers and the hash.
183 */
184 static struct mtx mprof_mtx;
185 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
186
187 static u_int64_t
188 nanoseconds(void)
189 {
190 struct timespec tv;
191
192 nanotime(&tv);
193 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
194 }
195
196 static int
197 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
198 {
199 struct sbuf *sb;
200 int error, i;
201 static int multiplier = 1;
202
203 if (first_free_mprof_buf == 0)
204 return (SYSCTL_OUT(req, "No locking recorded",
205 sizeof("No locking recorded")));
206
207 retry_sbufops:
208 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
209 sbuf_printf(sb, "\n%6s %12s %11s %5s %12s %12s %s\n",
210 "max", "total", "count", "avg", "cnt_hold", "cnt_lock", "name");
211 /*
212 * XXX this spinlock seems to be by far the largest perpetrator
213 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
214 * even before I pessimized it further by moving the average
215 * computation here).
216 */
217 mtx_lock_spin(&mprof_mtx);
218 for (i = 0; i < first_free_mprof_buf; ++i) {
219 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %12ju %12ju %s:%d (%s)\n",
220 mprof_buf[i].cnt_max / 1000,
221 mprof_buf[i].cnt_tot / 1000,
222 mprof_buf[i].cnt_cur,
223 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
224 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
225 mprof_buf[i].cnt_contest_holding,
226 mprof_buf[i].cnt_contest_locking,
227 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
228 if (sbuf_overflowed(sb)) {
229 mtx_unlock_spin(&mprof_mtx);
230 sbuf_delete(sb);
231 multiplier++;
232 goto retry_sbufops;
233 }
234 }
235 mtx_unlock_spin(&mprof_mtx);
236 sbuf_finish(sb);
237 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
238 sbuf_delete(sb);
239 return (error);
240 }
241 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
242 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
243
244 static int
245 reset_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
246 {
247 int error, v;
248
249 if (first_free_mprof_buf == 0)
250 return (0);
251
252 v = 0;
253 error = sysctl_handle_int(oidp, &v, 0, req);
254 if (error)
255 return (error);
256 if (req->newptr == NULL)
257 return (error);
258 if (v == 0)
259 return (0);
260
261 mtx_lock_spin(&mprof_mtx);
262 bzero(mprof_buf, sizeof(*mprof_buf) * first_free_mprof_buf);
263 bzero(mprof_hash, sizeof(struct mtx *) * MPROF_HASH_SIZE);
264 first_free_mprof_buf = 0;
265 mtx_unlock_spin(&mprof_mtx);
266 return (0);
267 }
268 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
269 NULL, 0, reset_mutex_prof_stats, "I", "Reset mutex profiling statistics");
270 #endif
271
272 /*
273 * Function versions of the inlined __mtx_* macros. These are used by
274 * modules and can also be called from assembly language if needed.
275 */
276 void
277 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
278 {
279
280 MPASS(curthread != NULL);
281 KASSERT(m->mtx_lock != MTX_DESTROYED,
282 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
283 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
284 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
285 file, line));
286 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
287 file, line);
288 _get_sleep_lock(m, curthread, opts, file, line);
289 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
290 line);
291 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
292 curthread->td_locks++;
293 #ifdef MUTEX_PROFILING
294 /* don't reset the timer when/if recursing */
295 if (m->mtx_acqtime == 0) {
296 m->mtx_filename = file;
297 m->mtx_lineno = line;
298 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
299 ++mutex_prof_acquisitions;
300 }
301 #endif
302 }
303
304 void
305 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
306 {
307
308 MPASS(curthread != NULL);
309 KASSERT(m->mtx_lock != MTX_DESTROYED,
310 ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
311 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
312 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
313 file, line));
314 curthread->td_locks--;
315 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
316 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
317 line);
318 mtx_assert(m, MA_OWNED);
319 #ifdef MUTEX_PROFILING
320 if (m->mtx_acqtime != 0) {
321 static const char *unknown = "(unknown)";
322 struct mutex_prof *mpp;
323 u_int64_t acqtime, now;
324 const char *p, *q;
325 volatile u_int hash;
326
327 now = nanoseconds();
328 acqtime = m->mtx_acqtime;
329 m->mtx_acqtime = 0;
330 if (now <= acqtime)
331 goto out;
332 for (p = m->mtx_filename;
333 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
334 /* nothing */ ;
335 if (p == NULL || *p == '\0')
336 p = unknown;
337 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
338 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
339 mtx_lock_spin(&mprof_mtx);
340 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
341 if (mpp->line == m->mtx_lineno &&
342 strcmp(mpp->file, p) == 0)
343 break;
344 if (mpp == NULL) {
345 /* Just exit if we cannot get a trace buffer */
346 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
347 ++mutex_prof_rejected;
348 goto unlock;
349 }
350 mpp = &mprof_buf[first_free_mprof_buf++];
351 mpp->name = mtx_name(m);
352 mpp->file = p;
353 mpp->line = m->mtx_lineno;
354 mpp->next = mprof_hash[hash];
355 if (mprof_hash[hash] != NULL)
356 ++mutex_prof_collisions;
357 mprof_hash[hash] = mpp;
358 ++mutex_prof_records;
359 }
360 /*
361 * Record if the mutex has been held longer now than ever
362 * before.
363 */
364 if (now - acqtime > mpp->cnt_max)
365 mpp->cnt_max = now - acqtime;
366 mpp->cnt_tot += now - acqtime;
367 mpp->cnt_cur++;
368 /*
369 * There's a small race, really we should cmpxchg
370 * 0 with the current value, but that would bill
371 * the contention to the wrong lock instance if
372 * it followed this also.
373 */
374 mpp->cnt_contest_holding += m->mtx_contest_holding;
375 m->mtx_contest_holding = 0;
376 mpp->cnt_contest_locking += m->mtx_contest_locking;
377 m->mtx_contest_locking = 0;
378 unlock:
379 mtx_unlock_spin(&mprof_mtx);
380 }
381 out:
382 #endif
383 _rel_sleep_lock(m, curthread, opts, file, line);
384 }
385
386 void
387 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
388 {
389
390 MPASS(curthread != NULL);
391 KASSERT(m->mtx_lock != MTX_DESTROYED,
392 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
393 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
394 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
395 m->mtx_object.lo_name, file, line));
396 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
397 file, line);
398 _get_spin_lock(m, curthread, opts, file, line);
399 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
400 line);
401 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
402 }
403
404 void
405 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
406 {
407
408 MPASS(curthread != NULL);
409 KASSERT(m->mtx_lock != MTX_DESTROYED,
410 ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
411 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
412 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
413 m->mtx_object.lo_name, file, line));
414 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
415 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
416 line);
417 mtx_assert(m, MA_OWNED);
418 _rel_spin_lock(m);
419 }
420
421 /*
422 * The important part of mtx_trylock{,_flags}()
423 * Tries to acquire lock `m.' If this function is called on a mutex that
424 * is already owned, it will recursively acquire the lock.
425 */
426 int
427 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
428 {
429 int rval;
430
431 MPASS(curthread != NULL);
432 KASSERT(m->mtx_lock != MTX_DESTROYED,
433 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
434 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
435 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
436 file, line));
437
438 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
439 m->mtx_recurse++;
440 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
441 rval = 1;
442 } else
443 rval = _obtain_lock(m, (uintptr_t)curthread);
444
445 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
446 if (rval) {
447 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
448 file, line);
449 curthread->td_locks++;
450 }
451
452 return (rval);
453 }
454
455 /*
456 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
457 *
458 * We call this if the lock is either contested (i.e. we need to go to
459 * sleep waiting for it), or if we need to recurse on it.
460 */
461 void
462 _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
463 int line)
464 {
465 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
466 volatile struct thread *owner;
467 #endif
468 uintptr_t v;
469 #ifdef KTR
470 int cont_logged = 0;
471 #endif
472 #ifdef MUTEX_PROFILING
473 int contested;
474 #endif
475
476 if (mtx_owned(m)) {
477 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
478 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
479 m->mtx_object.lo_name, file, line));
480 m->mtx_recurse++;
481 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
482 if (LOCK_LOG_TEST(&m->mtx_object, opts))
483 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
484 return;
485 }
486
487 if (LOCK_LOG_TEST(&m->mtx_object, opts))
488 CTR4(KTR_LOCK,
489 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
490 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
491
492 #ifdef MUTEX_PROFILING
493 contested = 0;
494 #endif
495 while (!_obtain_lock(m, tid)) {
496 #ifdef MUTEX_PROFILING
497 contested = 1;
498 atomic_add_int(&m->mtx_contest_holding, 1);
499 #endif
500 turnstile_lock(&m->mtx_object);
501 v = m->mtx_lock;
502
503 /*
504 * Check if the lock has been released while spinning for
505 * the turnstile chain lock.
506 */
507 if (v == MTX_UNOWNED) {
508 turnstile_release(&m->mtx_object);
509 cpu_spinwait();
510 continue;
511 }
512
513 #ifdef MUTEX_WAKE_ALL
514 MPASS(v != MTX_CONTESTED);
515 #else
516 /*
517 * The mutex was marked contested on release. This means that
518 * there are other threads blocked on it. Grab ownership of
519 * it and propagate its priority to the current thread if
520 * necessary.
521 */
522 if (v == MTX_CONTESTED) {
523 m->mtx_lock = tid | MTX_CONTESTED;
524 turnstile_claim(&m->mtx_object);
525 break;
526 }
527 #endif
528
529 /*
530 * If the mutex isn't already contested and a failure occurs
531 * setting the contested bit, the mutex was either released
532 * or the state of the MTX_RECURSED bit changed.
533 */
534 if ((v & MTX_CONTESTED) == 0 &&
535 !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
536 turnstile_release(&m->mtx_object);
537 cpu_spinwait();
538 continue;
539 }
540
541 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
542 /*
543 * If the current owner of the lock is executing on another
544 * CPU, spin instead of blocking.
545 */
546 owner = (struct thread *)(v & MTX_FLAGMASK);
547 #ifdef ADAPTIVE_GIANT
548 if (TD_IS_RUNNING(owner)) {
549 #else
550 if (m != &Giant && TD_IS_RUNNING(owner)) {
551 #endif
552 turnstile_release(&m->mtx_object);
553 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
554 cpu_spinwait();
555 }
556 continue;
557 }
558 #endif /* SMP && !NO_ADAPTIVE_MUTEXES */
559
560 /*
561 * We definitely must sleep for this lock.
562 */
563 mtx_assert(m, MA_NOTOWNED);
564
565 #ifdef KTR
566 if (!cont_logged) {
567 CTR6(KTR_CONTENTION,
568 "contention: %p at %s:%d wants %s, taken by %s:%d",
569 (void *)tid, file, line, m->mtx_object.lo_name,
570 WITNESS_FILE(&m->mtx_object),
571 WITNESS_LINE(&m->mtx_object));
572 cont_logged = 1;
573 }
574 #endif
575
576 /*
577 * Block on the turnstile.
578 */
579 turnstile_wait(&m->mtx_object, mtx_owner(m));
580 }
581
582 #ifdef KTR
583 if (cont_logged) {
584 CTR4(KTR_CONTENTION,
585 "contention end: %s acquired by %p at %s:%d",
586 m->mtx_object.lo_name, (void *)tid, file, line);
587 }
588 #endif
589 #ifdef MUTEX_PROFILING
590 if (contested)
591 m->mtx_contest_locking++;
592 m->mtx_contest_holding = 0;
593 #endif
594 return;
595 }
596
597 #ifdef SMP
598 /*
599 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
600 *
601 * This is only called if we need to actually spin for the lock. Recursion
602 * is handled inline.
603 */
604 void
605 _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
606 int line)
607 {
608 int i = 0;
609
610 if (LOCK_LOG_TEST(&m->mtx_object, opts))
611 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
612
613 for (;;) {
614 if (_obtain_lock(m, tid))
615 break;
616
617 /* Give interrupts a chance while we spin. */
618 spinlock_exit();
619 while (m->mtx_lock != MTX_UNOWNED) {
620 if (i++ < 10000000) {
621 cpu_spinwait();
622 continue;
623 }
624 if (i < 60000000)
625 DELAY(1);
626 else if (!kdb_active && !panicstr) {
627 printf("spin lock %s held by %p for > 5 seconds\n",
628 m->mtx_object.lo_name, (void *)m->mtx_lock);
629 #ifdef WITNESS
630 witness_display_spinlock(&m->mtx_object,
631 mtx_owner(m));
632 #endif
633 panic("spin lock held too long");
634 }
635 cpu_spinwait();
636 }
637 spinlock_enter();
638 }
639
640 if (LOCK_LOG_TEST(&m->mtx_object, opts))
641 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
642
643 return;
644 }
645 #endif /* SMP */
646
647 /*
648 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
649 *
650 * We are only called here if the lock is recursed or contested (i.e. we
651 * need to wake up a blocked thread).
652 */
653 void
654 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
655 {
656 struct turnstile *ts;
657 #ifndef PREEMPTION
658 struct thread *td, *td1;
659 #endif
660
661 if (mtx_recursed(m)) {
662 if (--(m->mtx_recurse) == 0)
663 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
664 if (LOCK_LOG_TEST(&m->mtx_object, opts))
665 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
666 return;
667 }
668
669 turnstile_lock(&m->mtx_object);
670 ts = turnstile_lookup(&m->mtx_object);
671 if (LOCK_LOG_TEST(&m->mtx_object, opts))
672 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
673
674 #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
675 if (ts == NULL) {
676 _release_lock_quick(m);
677 if (LOCK_LOG_TEST(&m->mtx_object, opts))
678 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
679 turnstile_release(&m->mtx_object);
680 return;
681 }
682 #else
683 MPASS(ts != NULL);
684 #endif
685 #ifndef PREEMPTION
686 /* XXX */
687 td1 = turnstile_head(ts);
688 #endif
689 #ifdef MUTEX_WAKE_ALL
690 turnstile_broadcast(ts);
691 _release_lock_quick(m);
692 #else
693 if (turnstile_signal(ts)) {
694 _release_lock_quick(m);
695 if (LOCK_LOG_TEST(&m->mtx_object, opts))
696 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
697 } else {
698 m->mtx_lock = MTX_CONTESTED;
699 if (LOCK_LOG_TEST(&m->mtx_object, opts))
700 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
701 m);
702 }
703 #endif
704 turnstile_unpend(ts);
705
706 #ifndef PREEMPTION
707 /*
708 * XXX: This is just a hack until preemption is done. However,
709 * once preemption is done we need to either wrap the
710 * turnstile_signal() and release of the actual lock in an
711 * extra critical section or change the preemption code to
712 * always just set a flag and never do instant-preempts.
713 */
714 td = curthread;
715 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
716 return;
717 mtx_lock_spin(&sched_lock);
718 if (!TD_IS_RUNNING(td1)) {
719 #ifdef notyet
720 if (td->td_ithd != NULL) {
721 struct ithd *it = td->td_ithd;
722
723 if (it->it_interrupted) {
724 if (LOCK_LOG_TEST(&m->mtx_object, opts))
725 CTR2(KTR_LOCK,
726 "_mtx_unlock_sleep: %p interrupted %p",
727 it, it->it_interrupted);
728 intr_thd_fixup(it);
729 }
730 }
731 #endif
732 if (LOCK_LOG_TEST(&m->mtx_object, opts))
733 CTR2(KTR_LOCK,
734 "_mtx_unlock_sleep: %p switching out lock=%p", m,
735 (void *)m->mtx_lock);
736
737 mi_switch(SW_INVOL, NULL);
738 if (LOCK_LOG_TEST(&m->mtx_object, opts))
739 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
740 m, (void *)m->mtx_lock);
741 }
742 mtx_unlock_spin(&sched_lock);
743 #endif
744
745 return;
746 }
747
748 /*
749 * All the unlocking of MTX_SPIN locks is done inline.
750 * See the _rel_spin_lock() macro for the details.
751 */
752
753 /*
754 * The backing function for the INVARIANTS-enabled mtx_assert()
755 */
756 #ifdef INVARIANT_SUPPORT
757 void
758 _mtx_assert(struct mtx *m, int what, const char *file, int line)
759 {
760
761 if (panicstr != NULL || dumping)
762 return;
763 switch (what) {
764 case MA_OWNED:
765 case MA_OWNED | MA_RECURSED:
766 case MA_OWNED | MA_NOTRECURSED:
767 if (!mtx_owned(m))
768 panic("mutex %s not owned at %s:%d",
769 m->mtx_object.lo_name, file, line);
770 if (mtx_recursed(m)) {
771 if ((what & MA_NOTRECURSED) != 0)
772 panic("mutex %s recursed at %s:%d",
773 m->mtx_object.lo_name, file, line);
774 } else if ((what & MA_RECURSED) != 0) {
775 panic("mutex %s unrecursed at %s:%d",
776 m->mtx_object.lo_name, file, line);
777 }
778 break;
779 case MA_NOTOWNED:
780 if (mtx_owned(m))
781 panic("mutex %s owned at %s:%d",
782 m->mtx_object.lo_name, file, line);
783 break;
784 default:
785 panic("unknown mtx_assert at %s:%d", file, line);
786 }
787 }
788 #endif
789
790 /*
791 * The MUTEX_DEBUG-enabled mtx_validate()
792 *
793 * Most of these checks have been moved off into the LO_INITIALIZED flag
794 * maintained by the witness code.
795 */
796 #ifdef MUTEX_DEBUG
797
798 void mtx_validate(struct mtx *);
799
800 void
801 mtx_validate(struct mtx *m)
802 {
803
804 /*
805 * XXX: When kernacc() does not require Giant we can reenable this check
806 */
807 #ifdef notyet
808 /*
809 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
810 * we can re-enable the kernacc() checks.
811 */
812 #ifndef __alpha__
813 /*
814 * Can't call kernacc() from early init386(), especially when
815 * initializing Giant mutex, because some stuff in kernacc()
816 * requires Giant itself.
817 */
818 if (!cold)
819 if (!kernacc((caddr_t)m, sizeof(m),
820 VM_PROT_READ | VM_PROT_WRITE))
821 panic("Can't read and write to mutex %p", m);
822 #endif
823 #endif
824 }
825 #endif
826
827 /*
828 * General init routine used by the MTX_SYSINIT() macro.
829 */
830 void
831 mtx_sysinit(void *arg)
832 {
833 struct mtx_args *margs = arg;
834
835 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
836 }
837
838 /*
839 * Mutex initialization routine; initialize lock `m' of type contained in
840 * `opts' with options contained in `opts' and name `name.' The optional
841 * lock type `type' is used as a general lock category name for use with
842 * witness.
843 */
844 void
845 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
846 {
847 struct lock_class *class;
848 int flags;
849
850 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
851 MTX_NOWITNESS | MTX_DUPOK)) == 0);
852
853 #ifdef MUTEX_DEBUG
854 /* Diagnostic and error correction */
855 mtx_validate(m);
856 #endif
857
858 /* Determine lock class and lock flags. */
859 if (opts & MTX_SPIN)
860 class = &lock_class_mtx_spin;
861 else
862 class = &lock_class_mtx_sleep;
863 flags = 0;
864 if (opts & MTX_QUIET)
865 flags |= LO_QUIET;
866 if (opts & MTX_RECURSE)
867 flags |= LO_RECURSABLE;
868 if ((opts & MTX_NOWITNESS) == 0)
869 flags |= LO_WITNESS;
870 if (opts & MTX_DUPOK)
871 flags |= LO_DUPOK;
872
873 /* Initialize mutex. */
874 m->mtx_lock = MTX_UNOWNED;
875 m->mtx_recurse = 0;
876 #ifdef MUTEX_PROFILING
877 m->mtx_acqtime = 0;
878 m->mtx_filename = NULL;
879 m->mtx_lineno = 0;
880 m->mtx_contest_holding = 0;
881 m->mtx_contest_locking = 0;
882 #endif
883
884 lock_init(&m->mtx_object, class, name, type, flags);
885 }
886
887 /*
888 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
889 * passed in as a flag here because if the corresponding mtx_init() was
890 * called with MTX_QUIET set, then it will already be set in the mutex's
891 * flags.
892 */
893 void
894 mtx_destroy(struct mtx *m)
895 {
896
897 if (!mtx_owned(m))
898 MPASS(mtx_unowned(m));
899 else {
900 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
901
902 /* Perform the non-mtx related part of mtx_unlock_spin(). */
903 if (LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin)
904 spinlock_exit();
905 else
906 curthread->td_locks--;
907
908 /* Tell witness this isn't locked to make it happy. */
909 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
910 __LINE__);
911 }
912
913 m->mtx_lock = MTX_DESTROYED;
914 lock_destroy(&m->mtx_object);
915 }
916
917 /*
918 * Intialize the mutex code and system mutexes. This is called from the MD
919 * startup code prior to mi_startup(). The per-CPU data space needs to be
920 * setup before this is called.
921 */
922 void
923 mutex_init(void)
924 {
925
926 /* Setup turnstiles so that sleep mutexes work. */
927 init_turnstiles();
928
929 /*
930 * Initialize mutexes.
931 */
932 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
933 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
934 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
935 mtx_init(&devmtx, "cdev", NULL, MTX_DEF);
936 mtx_lock(&Giant);
937 }
938
939 #ifdef DDB
940 void
941 db_show_mtx(struct lock_object *lock)
942 {
943 struct thread *td;
944 struct mtx *m;
945
946 m = (struct mtx *)lock;
947
948 db_printf(" flags: {");
949 if (LOCK_CLASS(lock) == &lock_class_mtx_spin)
950 db_printf("SPIN");
951 else
952 db_printf("DEF");
953 if (m->mtx_object.lo_flags & LO_RECURSABLE)
954 db_printf(", RECURSE");
955 if (m->mtx_object.lo_flags & LO_DUPOK)
956 db_printf(", DUPOK");
957 db_printf("}\n");
958 db_printf(" state: {");
959 if (mtx_unowned(m))
960 db_printf("UNOWNED");
961 else {
962 db_printf("OWNED");
963 if (m->mtx_lock & MTX_CONTESTED)
964 db_printf(", CONTESTED");
965 if (m->mtx_lock & MTX_RECURSED)
966 db_printf(", RECURSED");
967 }
968 db_printf("}\n");
969 if (!mtx_unowned(m)) {
970 td = mtx_owner(m);
971 db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td,
972 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
973 if (mtx_recursed(m))
974 db_printf(" recursed: %d\n", m->mtx_recurse);
975 }
976 }
977 #endif
Cache object: 892b47bdbbca71fc6157399abc1f4613
|