FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c
1 /*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32 /*
33 * Machine independent bits of mutex implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/5.2/sys/kern/kern_mutex.c 122514 2003-11-11 22:07:29Z jhb $");
38
39 #include "opt_adaptive_mutexes.h"
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sched.h>
53 #include <sys/sbuf.h>
54 #include <sys/sysctl.h>
55 #include <sys/turnstile.h>
56 #include <sys/vmmeter.h>
57
58 #include <machine/atomic.h>
59 #include <machine/bus.h>
60 #include <machine/clock.h>
61 #include <machine/cpu.h>
62
63 #include <ddb/ddb.h>
64
65 #include <vm/vm.h>
66 #include <vm/vm_extern.h>
67
68 /*
69 * Internal utility macros.
70 */
71 #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
72
73 #define mtx_owner(m) (mtx_unowned((m)) ? NULL \
74 : (struct thread *)((m)->mtx_lock & MTX_FLAGMASK))
75
76 /*
77 * Lock classes for sleep and spin mutexes.
78 */
79 struct lock_class lock_class_mtx_sleep = {
80 "sleep mutex",
81 LC_SLEEPLOCK | LC_RECURSABLE
82 };
83 struct lock_class lock_class_mtx_spin = {
84 "spin mutex",
85 LC_SPINLOCK | LC_RECURSABLE
86 };
87
88 /*
89 * System-wide mutexes
90 */
91 struct mtx sched_lock;
92 struct mtx Giant;
93
94 #ifdef MUTEX_PROFILING
95 SYSCTL_NODE(_debug, OID_AUTO, mutex, CTLFLAG_RD, NULL, "mutex debugging");
96 SYSCTL_NODE(_debug_mutex, OID_AUTO, prof, CTLFLAG_RD, NULL, "mutex profiling");
97 static int mutex_prof_enable = 0;
98 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, enable, CTLFLAG_RW,
99 &mutex_prof_enable, 0, "Enable tracing of mutex holdtime");
100
101 struct mutex_prof {
102 const char *name;
103 const char *file;
104 int line;
105 uintmax_t cnt_max;
106 uintmax_t cnt_tot;
107 uintmax_t cnt_cur;
108 struct mutex_prof *next;
109 };
110
111 /*
112 * mprof_buf is a static pool of profiling records to avoid possible
113 * reentrance of the memory allocation functions.
114 *
115 * Note: NUM_MPROF_BUFFERS must be smaller than MPROF_HASH_SIZE.
116 */
117 #define NUM_MPROF_BUFFERS 1000
118 static struct mutex_prof mprof_buf[NUM_MPROF_BUFFERS];
119 static int first_free_mprof_buf;
120 #define MPROF_HASH_SIZE 1009
121 static struct mutex_prof *mprof_hash[MPROF_HASH_SIZE];
122 /* SWAG: sbuf size = avg stat. line size * number of locks */
123 #define MPROF_SBUF_SIZE 256 * 400
124
125 static int mutex_prof_acquisitions;
126 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
127 &mutex_prof_acquisitions, 0, "Number of mutex acquistions recorded");
128 static int mutex_prof_records;
129 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, records, CTLFLAG_RD,
130 &mutex_prof_records, 0, "Number of profiling records");
131 static int mutex_prof_maxrecords = NUM_MPROF_BUFFERS;
132 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
133 &mutex_prof_maxrecords, 0, "Maximum number of profiling records");
134 static int mutex_prof_rejected;
135 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, rejected, CTLFLAG_RD,
136 &mutex_prof_rejected, 0, "Number of rejected profiling records");
137 static int mutex_prof_hashsize = MPROF_HASH_SIZE;
138 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, hashsize, CTLFLAG_RD,
139 &mutex_prof_hashsize, 0, "Hash size");
140 static int mutex_prof_collisions = 0;
141 SYSCTL_INT(_debug_mutex_prof, OID_AUTO, collisions, CTLFLAG_RD,
142 &mutex_prof_collisions, 0, "Number of hash collisions");
143
144 /*
145 * mprof_mtx protects the profiling buffers and the hash.
146 */
147 static struct mtx mprof_mtx;
148 MTX_SYSINIT(mprof, &mprof_mtx, "mutex profiling lock", MTX_SPIN | MTX_QUIET);
149
150 static u_int64_t
151 nanoseconds(void)
152 {
153 struct timespec tv;
154
155 nanotime(&tv);
156 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
157 }
158
159 static int
160 dump_mutex_prof_stats(SYSCTL_HANDLER_ARGS)
161 {
162 struct sbuf *sb;
163 int error, i;
164 static int multiplier = 1;
165
166 if (first_free_mprof_buf == 0)
167 return (SYSCTL_OUT(req, "No locking recorded",
168 sizeof("No locking recorded")));
169
170 retry_sbufops:
171 sb = sbuf_new(NULL, NULL, MPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
172 sbuf_printf(sb, "%6s %12s %11s %5s %s\n",
173 "max", "total", "count", "avg", "name");
174 /*
175 * XXX this spinlock seems to be by far the largest perpetrator
176 * of spinlock latency (1.6 msec on an Athlon1600 was recorded
177 * even before I pessimized it further by moving the average
178 * computation here).
179 */
180 mtx_lock_spin(&mprof_mtx);
181 for (i = 0; i < first_free_mprof_buf; ++i) {
182 sbuf_printf(sb, "%6ju %12ju %11ju %5ju %s:%d (%s)\n",
183 mprof_buf[i].cnt_max / 1000,
184 mprof_buf[i].cnt_tot / 1000,
185 mprof_buf[i].cnt_cur,
186 mprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
187 mprof_buf[i].cnt_tot / (mprof_buf[i].cnt_cur * 1000),
188 mprof_buf[i].file, mprof_buf[i].line, mprof_buf[i].name);
189 if (sbuf_overflowed(sb)) {
190 mtx_unlock_spin(&mprof_mtx);
191 sbuf_delete(sb);
192 multiplier++;
193 goto retry_sbufops;
194 }
195 }
196 mtx_unlock_spin(&mprof_mtx);
197 sbuf_finish(sb);
198 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
199 sbuf_delete(sb);
200 return (error);
201 }
202 SYSCTL_PROC(_debug_mutex_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
203 NULL, 0, dump_mutex_prof_stats, "A", "Mutex profiling statistics");
204 #endif
205
206 /*
207 * Function versions of the inlined __mtx_* macros. These are used by
208 * modules and can also be called from assembly language if needed.
209 */
210 void
211 _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
212 {
213
214 MPASS(curthread != NULL);
215 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
216 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
217 file, line));
218 _get_sleep_lock(m, curthread, opts, file, line);
219 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
220 line);
221 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
222 #ifdef MUTEX_PROFILING
223 /* don't reset the timer when/if recursing */
224 if (m->mtx_acqtime == 0) {
225 m->mtx_filename = file;
226 m->mtx_lineno = line;
227 m->mtx_acqtime = mutex_prof_enable ? nanoseconds() : 0;
228 ++mutex_prof_acquisitions;
229 }
230 #endif
231 }
232
233 void
234 _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
235 {
236
237 MPASS(curthread != NULL);
238 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_sleep,
239 ("mtx_unlock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
240 file, line));
241 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
242 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
243 line);
244 mtx_assert(m, MA_OWNED);
245 #ifdef MUTEX_PROFILING
246 if (m->mtx_acqtime != 0) {
247 static const char *unknown = "(unknown)";
248 struct mutex_prof *mpp;
249 u_int64_t acqtime, now;
250 const char *p, *q;
251 volatile u_int hash;
252
253 now = nanoseconds();
254 acqtime = m->mtx_acqtime;
255 m->mtx_acqtime = 0;
256 if (now <= acqtime)
257 goto out;
258 for (p = m->mtx_filename;
259 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
260 /* nothing */ ;
261 if (p == NULL || *p == '\0')
262 p = unknown;
263 for (hash = m->mtx_lineno, q = p; *q != '\0'; ++q)
264 hash = (hash * 2 + *q) % MPROF_HASH_SIZE;
265 mtx_lock_spin(&mprof_mtx);
266 for (mpp = mprof_hash[hash]; mpp != NULL; mpp = mpp->next)
267 if (mpp->line == m->mtx_lineno &&
268 strcmp(mpp->file, p) == 0)
269 break;
270 if (mpp == NULL) {
271 /* Just exit if we cannot get a trace buffer */
272 if (first_free_mprof_buf >= NUM_MPROF_BUFFERS) {
273 ++mutex_prof_rejected;
274 goto unlock;
275 }
276 mpp = &mprof_buf[first_free_mprof_buf++];
277 mpp->name = mtx_name(m);
278 mpp->file = p;
279 mpp->line = m->mtx_lineno;
280 mpp->next = mprof_hash[hash];
281 if (mprof_hash[hash] != NULL)
282 ++mutex_prof_collisions;
283 mprof_hash[hash] = mpp;
284 ++mutex_prof_records;
285 }
286 /*
287 * Record if the mutex has been held longer now than ever
288 * before.
289 */
290 if (now - acqtime > mpp->cnt_max)
291 mpp->cnt_max = now - acqtime;
292 mpp->cnt_tot += now - acqtime;
293 mpp->cnt_cur++;
294 unlock:
295 mtx_unlock_spin(&mprof_mtx);
296 }
297 out:
298 #endif
299 _rel_sleep_lock(m, curthread, opts, file, line);
300 }
301
302 void
303 _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
304 {
305
306 MPASS(curthread != NULL);
307 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
308 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
309 m->mtx_object.lo_name, file, line));
310 #if defined(SMP) || LOCK_DEBUG > 0 || 1
311 _get_spin_lock(m, curthread, opts, file, line);
312 #else
313 critical_enter();
314 #endif
315 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
316 line);
317 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
318 }
319
320 void
321 _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
322 {
323
324 MPASS(curthread != NULL);
325 KASSERT(m->mtx_object.lo_class == &lock_class_mtx_spin,
326 ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
327 m->mtx_object.lo_name, file, line));
328 WITNESS_UNLOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
329 LOCK_LOG_LOCK("UNLOCK", &m->mtx_object, opts, m->mtx_recurse, file,
330 line);
331 mtx_assert(m, MA_OWNED);
332 #if defined(SMP) || LOCK_DEBUG > 0 || 1
333 _rel_spin_lock(m);
334 #else
335 critical_exit();
336 #endif
337 }
338
339 /*
340 * The important part of mtx_trylock{,_flags}()
341 * Tries to acquire lock `m.' We do NOT handle recursion here. If this
342 * function is called on a recursed mutex, it will return failure and
343 * will not recursively acquire the lock. You are expected to know what
344 * you are doing.
345 */
346 int
347 _mtx_trylock(struct mtx *m, int opts, const char *file, int line)
348 {
349 int rval;
350
351 MPASS(curthread != NULL);
352
353 rval = _obtain_lock(m, curthread);
354
355 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
356 if (rval)
357 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
358 file, line);
359
360 return (rval);
361 }
362
363 /*
364 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
365 *
366 * We call this if the lock is either contested (i.e. we need to go to
367 * sleep waiting for it), or if we need to recurse on it.
368 */
369 void
370 _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line)
371 {
372 struct turnstile *ts;
373 struct thread *td = curthread;
374 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
375 struct thread *owner;
376 #endif
377 uintptr_t v;
378 #ifdef KTR
379 int cont_logged = 0;
380 #endif
381
382 if (mtx_owned(m)) {
383 m->mtx_recurse++;
384 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
385 if (LOCK_LOG_TEST(&m->mtx_object, opts))
386 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
387 return;
388 }
389
390 if (LOCK_LOG_TEST(&m->mtx_object, opts))
391 CTR4(KTR_LOCK,
392 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
393 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
394
395 while (!_obtain_lock(m, td)) {
396
397 ts = turnstile_lookup(&m->mtx_object);
398 v = m->mtx_lock;
399
400 /*
401 * Check if the lock has been released while spinning for
402 * the turnstile chain lock.
403 */
404 if (v == MTX_UNOWNED) {
405 turnstile_release(&m->mtx_object);
406 #ifdef __i386__
407 ia32_pause();
408 #endif
409 continue;
410 }
411
412 /*
413 * The mutex was marked contested on release. This means that
414 * there are other threads blocked on it. Grab ownership of
415 * it and propagate its priority to the current thread if
416 * necessary.
417 */
418 if (v == MTX_CONTESTED) {
419 MPASS(ts != NULL);
420 m->mtx_lock = (uintptr_t)td | MTX_CONTESTED;
421 turnstile_claim(ts);
422 return;
423 }
424
425 /*
426 * If the mutex isn't already contested and a failure occurs
427 * setting the contested bit, the mutex was either released
428 * or the state of the MTX_RECURSED bit changed.
429 */
430 if ((v & MTX_CONTESTED) == 0 &&
431 !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
432 (void *)(v | MTX_CONTESTED))) {
433 turnstile_release(&m->mtx_object);
434 #ifdef __i386__
435 ia32_pause();
436 #endif
437 continue;
438 }
439
440 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
441 /*
442 * If the current owner of the lock is executing on another
443 * CPU, spin instead of blocking.
444 */
445 owner = (struct thread *)(v & MTX_FLAGMASK);
446 if (m != &Giant && TD_IS_RUNNING(owner)) {
447 turnstile_release(&m->mtx_object);
448 while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) {
449 #ifdef __i386__
450 ia32_pause();
451 #endif
452 }
453 continue;
454 }
455 #endif /* SMP && ADAPTIVE_MUTEXES */
456
457 /*
458 * We definitely must sleep for this lock.
459 */
460 mtx_assert(m, MA_NOTOWNED);
461
462 #ifdef KTR
463 if (!cont_logged) {
464 CTR6(KTR_CONTENTION,
465 "contention: %p at %s:%d wants %s, taken by %s:%d",
466 td, file, line, m->mtx_object.lo_name,
467 WITNESS_FILE(&m->mtx_object),
468 WITNESS_LINE(&m->mtx_object));
469 cont_logged = 1;
470 }
471 #endif
472
473 /*
474 * Block on the turnstile.
475 */
476 turnstile_wait(ts, &m->mtx_object, mtx_owner(m));
477 }
478
479 #ifdef KTR
480 if (cont_logged) {
481 CTR4(KTR_CONTENTION,
482 "contention end: %s acquired by %p at %s:%d",
483 m->mtx_object.lo_name, td, file, line);
484 }
485 #endif
486 return;
487 }
488
489 /*
490 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
491 *
492 * This is only called if we need to actually spin for the lock. Recursion
493 * is handled inline.
494 */
495 void
496 _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line)
497 {
498 int i = 0;
499
500 if (LOCK_LOG_TEST(&m->mtx_object, opts))
501 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
502
503 for (;;) {
504 if (_obtain_lock(m, curthread))
505 break;
506
507 /* Give interrupts a chance while we spin. */
508 critical_exit();
509 while (m->mtx_lock != MTX_UNOWNED) {
510 if (i++ < 10000000) {
511 #ifdef __i386__
512 ia32_pause();
513 #endif
514 continue;
515 }
516 if (i < 60000000)
517 DELAY(1);
518 #ifdef DDB
519 else if (!db_active) {
520 #else
521 else {
522 #endif
523 printf("spin lock %s held by %p for > 5 seconds\n",
524 m->mtx_object.lo_name, (void *)m->mtx_lock);
525 #ifdef WITNESS
526 witness_display_spinlock(&m->mtx_object,
527 mtx_owner(m));
528 #endif
529 panic("spin lock held too long");
530 }
531 #ifdef __i386__
532 ia32_pause();
533 #endif
534 }
535 critical_enter();
536 }
537
538 if (LOCK_LOG_TEST(&m->mtx_object, opts))
539 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
540
541 return;
542 }
543
544 /*
545 * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock.
546 *
547 * We are only called here if the lock is recursed or contested (i.e. we
548 * need to wake up a blocked thread).
549 */
550 void
551 _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
552 {
553 struct turnstile *ts;
554 struct thread *td, *td1;
555
556 if (mtx_recursed(m)) {
557 if (--(m->mtx_recurse) == 0)
558 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
559 if (LOCK_LOG_TEST(&m->mtx_object, opts))
560 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
561 return;
562 }
563
564 ts = turnstile_lookup(&m->mtx_object);
565 if (LOCK_LOG_TEST(&m->mtx_object, opts))
566 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
567
568 #if defined(SMP) && defined(ADAPTIVE_MUTEXES)
569 if (ts == NULL) {
570 _release_lock_quick(m);
571 if (LOCK_LOG_TEST(&m->mtx_object, opts))
572 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m);
573 turnstile_release(&m->mtx_object);
574 return;
575 }
576 #else
577 MPASS(ts != NULL);
578 #endif
579 /* XXX */
580 td1 = turnstile_head(ts);
581 if (turnstile_signal(ts)) {
582 _release_lock_quick(m);
583 if (LOCK_LOG_TEST(&m->mtx_object, opts))
584 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
585 } else {
586 m->mtx_lock = MTX_CONTESTED;
587 if (LOCK_LOG_TEST(&m->mtx_object, opts))
588 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p still contested",
589 m);
590 }
591 turnstile_unpend(ts);
592
593 /*
594 * XXX: This is just a hack until preemption is done. However,
595 * once preemption is done we need to either wrap the
596 * turnstile_signal() and release of the actual lock in an
597 * extra critical section or change the preemption code to
598 * always just set a flag and never do instant-preempts.
599 */
600 td = curthread;
601 if (td->td_critnest > 0 || td1->td_priority >= td->td_priority)
602 return;
603 mtx_lock_spin(&sched_lock);
604 if (!TD_IS_RUNNING(td1)) {
605 #ifdef notyet
606 if (td->td_ithd != NULL) {
607 struct ithd *it = td->td_ithd;
608
609 if (it->it_interrupted) {
610 if (LOCK_LOG_TEST(&m->mtx_object, opts))
611 CTR2(KTR_LOCK,
612 "_mtx_unlock_sleep: %p interrupted %p",
613 it, it->it_interrupted);
614 intr_thd_fixup(it);
615 }
616 }
617 #endif
618 if (LOCK_LOG_TEST(&m->mtx_object, opts))
619 CTR2(KTR_LOCK,
620 "_mtx_unlock_sleep: %p switching out lock=%p", m,
621 (void *)m->mtx_lock);
622
623 td->td_proc->p_stats->p_ru.ru_nivcsw++;
624 mi_switch();
625 if (LOCK_LOG_TEST(&m->mtx_object, opts))
626 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
627 m, (void *)m->mtx_lock);
628 }
629 mtx_unlock_spin(&sched_lock);
630
631 return;
632 }
633
634 /*
635 * All the unlocking of MTX_SPIN locks is done inline.
636 * See the _rel_spin_lock() macro for the details.
637 */
638
639 /*
640 * The backing function for the INVARIANTS-enabled mtx_assert()
641 */
642 #ifdef INVARIANT_SUPPORT
643 void
644 _mtx_assert(struct mtx *m, int what, const char *file, int line)
645 {
646
647 if (panicstr != NULL)
648 return;
649 switch (what) {
650 case MA_OWNED:
651 case MA_OWNED | MA_RECURSED:
652 case MA_OWNED | MA_NOTRECURSED:
653 if (!mtx_owned(m))
654 panic("mutex %s not owned at %s:%d",
655 m->mtx_object.lo_name, file, line);
656 if (mtx_recursed(m)) {
657 if ((what & MA_NOTRECURSED) != 0)
658 panic("mutex %s recursed at %s:%d",
659 m->mtx_object.lo_name, file, line);
660 } else if ((what & MA_RECURSED) != 0) {
661 panic("mutex %s unrecursed at %s:%d",
662 m->mtx_object.lo_name, file, line);
663 }
664 break;
665 case MA_NOTOWNED:
666 if (mtx_owned(m))
667 panic("mutex %s owned at %s:%d",
668 m->mtx_object.lo_name, file, line);
669 break;
670 default:
671 panic("unknown mtx_assert at %s:%d", file, line);
672 }
673 }
674 #endif
675
676 /*
677 * The MUTEX_DEBUG-enabled mtx_validate()
678 *
679 * Most of these checks have been moved off into the LO_INITIALIZED flag
680 * maintained by the witness code.
681 */
682 #ifdef MUTEX_DEBUG
683
684 void mtx_validate(struct mtx *);
685
686 void
687 mtx_validate(struct mtx *m)
688 {
689
690 /*
691 * XXX: When kernacc() does not require Giant we can reenable this check
692 */
693 #ifdef notyet
694 /*
695 * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
696 * we can re-enable the kernacc() checks.
697 */
698 #ifndef __alpha__
699 /*
700 * Can't call kernacc() from early init386(), especially when
701 * initializing Giant mutex, because some stuff in kernacc()
702 * requires Giant itself.
703 */
704 if (!cold)
705 if (!kernacc((caddr_t)m, sizeof(m),
706 VM_PROT_READ | VM_PROT_WRITE))
707 panic("Can't read and write to mutex %p", m);
708 #endif
709 #endif
710 }
711 #endif
712
713 /*
714 * General init routine used by the MTX_SYSINIT() macro.
715 */
716 void
717 mtx_sysinit(void *arg)
718 {
719 struct mtx_args *margs = arg;
720
721 mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts);
722 }
723
724 /*
725 * Mutex initialization routine; initialize lock `m' of type contained in
726 * `opts' with options contained in `opts' and name `name.' The optional
727 * lock type `type' is used as a general lock category name for use with
728 * witness.
729 */
730 void
731 mtx_init(struct mtx *m, const char *name, const char *type, int opts)
732 {
733 struct lock_object *lock;
734
735 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
736 MTX_NOWITNESS | MTX_DUPOK)) == 0);
737
738 #ifdef MUTEX_DEBUG
739 /* Diagnostic and error correction */
740 mtx_validate(m);
741 #endif
742
743 lock = &m->mtx_object;
744 KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
745 ("mutex \"%s\" %p already initialized", name, m));
746 bzero(m, sizeof(*m));
747 if (opts & MTX_SPIN)
748 lock->lo_class = &lock_class_mtx_spin;
749 else
750 lock->lo_class = &lock_class_mtx_sleep;
751 lock->lo_name = name;
752 lock->lo_type = type != NULL ? type : name;
753 if (opts & MTX_QUIET)
754 lock->lo_flags = LO_QUIET;
755 if (opts & MTX_RECURSE)
756 lock->lo_flags |= LO_RECURSABLE;
757 if ((opts & MTX_NOWITNESS) == 0)
758 lock->lo_flags |= LO_WITNESS;
759 if (opts & MTX_DUPOK)
760 lock->lo_flags |= LO_DUPOK;
761
762 m->mtx_lock = MTX_UNOWNED;
763
764 LOCK_LOG_INIT(lock, opts);
765
766 WITNESS_INIT(lock);
767 }
768
769 /*
770 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
771 * passed in as a flag here because if the corresponding mtx_init() was
772 * called with MTX_QUIET set, then it will already be set in the mutex's
773 * flags.
774 */
775 void
776 mtx_destroy(struct mtx *m)
777 {
778
779 LOCK_LOG_DESTROY(&m->mtx_object, 0);
780
781 if (!mtx_owned(m))
782 MPASS(mtx_unowned(m));
783 else {
784 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
785
786 /* Tell witness this isn't locked to make it happy. */
787 WITNESS_UNLOCK(&m->mtx_object, LOP_EXCLUSIVE, __FILE__,
788 __LINE__);
789 }
790
791 WITNESS_DESTROY(&m->mtx_object);
792 }
793
794 /*
795 * Intialize the mutex code and system mutexes. This is called from the MD
796 * startup code prior to mi_startup(). The per-CPU data space needs to be
797 * setup before this is called.
798 */
799 void
800 mutex_init(void)
801 {
802
803 /* Setup thread0 so that mutexes work. */
804 LIST_INIT(&thread0.td_contested);
805
806 /* Setup turnstiles so that sleep mutexes work. */
807 init_turnstiles();
808
809 /*
810 * Initialize mutexes.
811 */
812 mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE);
813 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
814 mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
815 mtx_lock(&Giant);
816 }
Cache object: b281f261b15b91d1edcea6af44c2afc2
|