1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * Machine independent bits of reader/writer lock implementation.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_rmlock.c 334546 2018-06-02 22:37:53Z mjg $");
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43
44 #include <sys/kernel.h>
45 #include <sys/kdb.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/rmlock.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/turnstile.h>
54 #include <sys/lock_profile.h>
55 #include <machine/cpu.h>
56
57 #ifdef DDB
58 #include <ddb/ddb.h>
59 #endif
60
61 /*
62 * A cookie to mark destroyed rmlocks. This is stored in the head of
63 * rm_activeReaders.
64 */
65 #define RM_DESTROYED ((void *)0xdead)
66
67 #define rm_destroyed(rm) \
68 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
69
70 #define RMPF_ONQUEUE 1
71 #define RMPF_SIGNAL 2
72
73 #ifndef INVARIANTS
74 #define _rm_assert(c, what, file, line)
75 #endif
76
77 static void assert_rm(const struct lock_object *lock, int what);
78 #ifdef DDB
79 static void db_show_rm(const struct lock_object *lock);
80 #endif
81 static void lock_rm(struct lock_object *lock, uintptr_t how);
82 #ifdef KDTRACE_HOOKS
83 static int owner_rm(const struct lock_object *lock, struct thread **owner);
84 #endif
85 static uintptr_t unlock_rm(struct lock_object *lock);
86
87 struct lock_class lock_class_rm = {
88 .lc_name = "rm",
89 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
90 .lc_assert = assert_rm,
91 #ifdef DDB
92 .lc_ddb_show = db_show_rm,
93 #endif
94 .lc_lock = lock_rm,
95 .lc_unlock = unlock_rm,
96 #ifdef KDTRACE_HOOKS
97 .lc_owner = owner_rm,
98 #endif
99 };
100
101 struct lock_class lock_class_rm_sleepable = {
102 .lc_name = "sleepable rm",
103 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
104 .lc_assert = assert_rm,
105 #ifdef DDB
106 .lc_ddb_show = db_show_rm,
107 #endif
108 .lc_lock = lock_rm,
109 .lc_unlock = unlock_rm,
110 #ifdef KDTRACE_HOOKS
111 .lc_owner = owner_rm,
112 #endif
113 };
114
115 static void
116 assert_rm(const struct lock_object *lock, int what)
117 {
118
119 rm_assert((const struct rmlock *)lock, what);
120 }
121
122 static void
123 lock_rm(struct lock_object *lock, uintptr_t how)
124 {
125 struct rmlock *rm;
126 struct rm_priotracker *tracker;
127
128 rm = (struct rmlock *)lock;
129 if (how == 0)
130 rm_wlock(rm);
131 else {
132 tracker = (struct rm_priotracker *)how;
133 rm_rlock(rm, tracker);
134 }
135 }
136
137 static uintptr_t
138 unlock_rm(struct lock_object *lock)
139 {
140 struct thread *td;
141 struct pcpu *pc;
142 struct rmlock *rm;
143 struct rm_queue *queue;
144 struct rm_priotracker *tracker;
145 uintptr_t how;
146
147 rm = (struct rmlock *)lock;
148 tracker = NULL;
149 how = 0;
150 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
151 if (rm_wowned(rm))
152 rm_wunlock(rm);
153 else {
154 /*
155 * Find the right rm_priotracker structure for curthread.
156 * The guarantee about its uniqueness is given by the fact
157 * we already asserted the lock wasn't recursively acquired.
158 */
159 critical_enter();
160 td = curthread;
161 pc = get_pcpu();
162 for (queue = pc->pc_rm_queue.rmq_next;
163 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
164 tracker = (struct rm_priotracker *)queue;
165 if ((tracker->rmp_rmlock == rm) &&
166 (tracker->rmp_thread == td)) {
167 how = (uintptr_t)tracker;
168 break;
169 }
170 }
171 KASSERT(tracker != NULL,
172 ("rm_priotracker is non-NULL when lock held in read mode"));
173 critical_exit();
174 rm_runlock(rm, tracker);
175 }
176 return (how);
177 }
178
179 #ifdef KDTRACE_HOOKS
180 static int
181 owner_rm(const struct lock_object *lock, struct thread **owner)
182 {
183 const struct rmlock *rm;
184 struct lock_class *lc;
185
186 rm = (const struct rmlock *)lock;
187 lc = LOCK_CLASS(&rm->rm_wlock_object);
188 return (lc->lc_owner(&rm->rm_wlock_object, owner));
189 }
190 #endif
191
192 static struct mtx rm_spinlock;
193
194 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
195
196 /*
197 * Add or remove tracker from per-cpu list.
198 *
199 * The per-cpu list can be traversed at any time in forward direction from an
200 * interrupt on the *local* cpu.
201 */
202 static void inline
203 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
204 {
205 struct rm_queue *next;
206
207 /* Initialize all tracker pointers */
208 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
209 next = pc->pc_rm_queue.rmq_next;
210 tracker->rmp_cpuQueue.rmq_next = next;
211
212 /* rmq_prev is not used during froward traversal. */
213 next->rmq_prev = &tracker->rmp_cpuQueue;
214
215 /* Update pointer to first element. */
216 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
217 }
218
219 /*
220 * Return a count of the number of trackers the thread 'td' already
221 * has on this CPU for the lock 'rm'.
222 */
223 static int
224 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
225 const struct thread *td)
226 {
227 struct rm_queue *queue;
228 struct rm_priotracker *tracker;
229 int count;
230
231 count = 0;
232 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
233 queue = queue->rmq_next) {
234 tracker = (struct rm_priotracker *)queue;
235 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
236 count++;
237 }
238 return (count);
239 }
240
241 static void inline
242 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
243 {
244 struct rm_queue *next, *prev;
245
246 next = tracker->rmp_cpuQueue.rmq_next;
247 prev = tracker->rmp_cpuQueue.rmq_prev;
248
249 /* Not used during forward traversal. */
250 next->rmq_prev = prev;
251
252 /* Remove from list. */
253 prev->rmq_next = next;
254 }
255
256 static void
257 rm_cleanIPI(void *arg)
258 {
259 struct pcpu *pc;
260 struct rmlock *rm = arg;
261 struct rm_priotracker *tracker;
262 struct rm_queue *queue;
263 pc = get_pcpu();
264
265 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
266 queue = queue->rmq_next) {
267 tracker = (struct rm_priotracker *)queue;
268 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
269 tracker->rmp_flags = RMPF_ONQUEUE;
270 mtx_lock_spin(&rm_spinlock);
271 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
272 rmp_qentry);
273 mtx_unlock_spin(&rm_spinlock);
274 }
275 }
276 }
277
278 void
279 rm_init_flags(struct rmlock *rm, const char *name, int opts)
280 {
281 struct lock_class *lc;
282 int liflags, xflags;
283
284 liflags = 0;
285 if (!(opts & RM_NOWITNESS))
286 liflags |= LO_WITNESS;
287 if (opts & RM_RECURSE)
288 liflags |= LO_RECURSABLE;
289 if (opts & RM_NEW)
290 liflags |= LO_NEW;
291 rm->rm_writecpus = all_cpus;
292 LIST_INIT(&rm->rm_activeReaders);
293 if (opts & RM_SLEEPABLE) {
294 liflags |= LO_SLEEPABLE;
295 lc = &lock_class_rm_sleepable;
296 xflags = (opts & RM_NEW ? SX_NEW : 0);
297 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
298 xflags | SX_NOWITNESS);
299 } else {
300 lc = &lock_class_rm;
301 xflags = (opts & RM_NEW ? MTX_NEW : 0);
302 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
303 xflags | MTX_NOWITNESS);
304 }
305 lock_init(&rm->lock_object, lc, name, NULL, liflags);
306 }
307
308 void
309 rm_init(struct rmlock *rm, const char *name)
310 {
311
312 rm_init_flags(rm, name, 0);
313 }
314
315 void
316 rm_destroy(struct rmlock *rm)
317 {
318
319 rm_assert(rm, RA_UNLOCKED);
320 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
321 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
322 sx_destroy(&rm->rm_lock_sx);
323 else
324 mtx_destroy(&rm->rm_lock_mtx);
325 lock_destroy(&rm->lock_object);
326 }
327
328 int
329 rm_wowned(const struct rmlock *rm)
330 {
331
332 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
333 return (sx_xlocked(&rm->rm_lock_sx));
334 else
335 return (mtx_owned(&rm->rm_lock_mtx));
336 }
337
338 void
339 rm_sysinit(void *arg)
340 {
341 struct rm_args *args;
342
343 args = arg;
344 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
345 }
346
347 static __noinline int
348 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
349 {
350 struct pcpu *pc;
351
352 critical_enter();
353 pc = get_pcpu();
354
355 /* Check if we just need to do a proper critical_exit. */
356 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
357 critical_exit();
358 return (1);
359 }
360
361 /* Remove our tracker from the per-cpu list. */
362 rm_tracker_remove(pc, tracker);
363
364 /* Check to see if the IPI granted us the lock after all. */
365 if (tracker->rmp_flags) {
366 /* Just add back tracker - we hold the lock. */
367 rm_tracker_add(pc, tracker);
368 critical_exit();
369 return (1);
370 }
371
372 /*
373 * We allow readers to acquire a lock even if a writer is blocked if
374 * the lock is recursive and the reader already holds the lock.
375 */
376 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
377 /*
378 * Just grant the lock if this thread already has a tracker
379 * for this lock on the per-cpu queue.
380 */
381 if (rm_trackers_present(pc, rm, curthread) != 0) {
382 mtx_lock_spin(&rm_spinlock);
383 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
384 rmp_qentry);
385 tracker->rmp_flags = RMPF_ONQUEUE;
386 mtx_unlock_spin(&rm_spinlock);
387 rm_tracker_add(pc, tracker);
388 critical_exit();
389 return (1);
390 }
391 }
392
393 sched_unpin();
394 critical_exit();
395
396 if (trylock) {
397 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
398 if (!sx_try_xlock(&rm->rm_lock_sx))
399 return (0);
400 } else {
401 if (!mtx_trylock(&rm->rm_lock_mtx))
402 return (0);
403 }
404 } else {
405 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
406 THREAD_SLEEPING_OK();
407 sx_xlock(&rm->rm_lock_sx);
408 THREAD_NO_SLEEPING();
409 } else
410 mtx_lock(&rm->rm_lock_mtx);
411 }
412
413 critical_enter();
414 pc = get_pcpu();
415 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
416 rm_tracker_add(pc, tracker);
417 sched_pin();
418 critical_exit();
419
420 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
421 sx_xunlock(&rm->rm_lock_sx);
422 else
423 mtx_unlock(&rm->rm_lock_mtx);
424
425 return (1);
426 }
427
428 int
429 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
430 {
431 struct thread *td = curthread;
432 struct pcpu *pc;
433
434 if (SCHEDULER_STOPPED())
435 return (1);
436
437 tracker->rmp_flags = 0;
438 tracker->rmp_thread = td;
439 tracker->rmp_rmlock = rm;
440
441 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
442 THREAD_NO_SLEEPING();
443
444 td->td_critnest++; /* critical_enter(); */
445
446 __compiler_membar();
447
448 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
449
450 rm_tracker_add(pc, tracker);
451
452 sched_pin();
453
454 __compiler_membar();
455
456 td->td_critnest--;
457
458 /*
459 * Fast path to combine two common conditions into a single
460 * conditional jump.
461 */
462 if (__predict_true(0 == (td->td_owepreempt |
463 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
464 return (1);
465
466 /* We do not have a read token and need to acquire one. */
467 return _rm_rlock_hard(rm, tracker, trylock);
468 }
469
470 static __noinline void
471 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
472 {
473
474 if (td->td_owepreempt) {
475 td->td_critnest++;
476 critical_exit();
477 }
478
479 if (!tracker->rmp_flags)
480 return;
481
482 mtx_lock_spin(&rm_spinlock);
483 LIST_REMOVE(tracker, rmp_qentry);
484
485 if (tracker->rmp_flags & RMPF_SIGNAL) {
486 struct rmlock *rm;
487 struct turnstile *ts;
488
489 rm = tracker->rmp_rmlock;
490
491 turnstile_chain_lock(&rm->lock_object);
492 mtx_unlock_spin(&rm_spinlock);
493
494 ts = turnstile_lookup(&rm->lock_object);
495
496 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
497 turnstile_unpend(ts);
498 turnstile_chain_unlock(&rm->lock_object);
499 } else
500 mtx_unlock_spin(&rm_spinlock);
501 }
502
503 void
504 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
505 {
506 struct pcpu *pc;
507 struct thread *td = tracker->rmp_thread;
508
509 if (SCHEDULER_STOPPED())
510 return;
511
512 td->td_critnest++; /* critical_enter(); */
513 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
514 rm_tracker_remove(pc, tracker);
515 td->td_critnest--;
516 sched_unpin();
517
518 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
519 THREAD_SLEEPING_OK();
520
521 if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
522 return;
523
524 _rm_unlock_hard(td, tracker);
525 }
526
527 void
528 _rm_wlock(struct rmlock *rm)
529 {
530 struct rm_priotracker *prio;
531 struct turnstile *ts;
532 cpuset_t readcpus;
533
534 if (SCHEDULER_STOPPED())
535 return;
536
537 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
538 sx_xlock(&rm->rm_lock_sx);
539 else
540 mtx_lock(&rm->rm_lock_mtx);
541
542 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
543 /* Get all read tokens back */
544 readcpus = all_cpus;
545 CPU_NAND(&readcpus, &rm->rm_writecpus);
546 rm->rm_writecpus = all_cpus;
547
548 /*
549 * Assumes rm->rm_writecpus update is visible on other CPUs
550 * before rm_cleanIPI is called.
551 */
552 #ifdef SMP
553 smp_rendezvous_cpus(readcpus,
554 smp_no_rendezvous_barrier,
555 rm_cleanIPI,
556 smp_no_rendezvous_barrier,
557 rm);
558
559 #else
560 rm_cleanIPI(rm);
561 #endif
562
563 mtx_lock_spin(&rm_spinlock);
564 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
565 ts = turnstile_trywait(&rm->lock_object);
566 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
567 mtx_unlock_spin(&rm_spinlock);
568 turnstile_wait(ts, prio->rmp_thread,
569 TS_EXCLUSIVE_QUEUE);
570 mtx_lock_spin(&rm_spinlock);
571 }
572 mtx_unlock_spin(&rm_spinlock);
573 }
574 }
575
576 void
577 _rm_wunlock(struct rmlock *rm)
578 {
579
580 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
581 sx_xunlock(&rm->rm_lock_sx);
582 else
583 mtx_unlock(&rm->rm_lock_mtx);
584 }
585
586 #if LOCK_DEBUG > 0
587
588 void
589 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
590 {
591
592 if (SCHEDULER_STOPPED())
593 return;
594
595 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
596 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
597 curthread, rm->lock_object.lo_name, file, line));
598 KASSERT(!rm_destroyed(rm),
599 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
600 _rm_assert(rm, RA_UNLOCKED, file, line);
601
602 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
603 file, line, NULL);
604
605 _rm_wlock(rm);
606
607 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
608 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
609 TD_LOCKS_INC(curthread);
610 }
611
612 void
613 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
614 {
615
616 if (SCHEDULER_STOPPED())
617 return;
618
619 KASSERT(!rm_destroyed(rm),
620 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
621 _rm_assert(rm, RA_WLOCKED, file, line);
622 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
623 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
624 _rm_wunlock(rm);
625 TD_LOCKS_DEC(curthread);
626 }
627
628 int
629 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
630 int trylock, const char *file, int line)
631 {
632
633 if (SCHEDULER_STOPPED())
634 return (1);
635
636 #ifdef INVARIANTS
637 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
638 critical_enter();
639 KASSERT(rm_trackers_present(get_pcpu(), rm,
640 curthread) == 0,
641 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
642 rm->lock_object.lo_name, file, line));
643 critical_exit();
644 }
645 #endif
646 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
647 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
648 curthread, rm->lock_object.lo_name, file, line));
649 KASSERT(!rm_destroyed(rm),
650 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
651 if (!trylock) {
652 KASSERT(!rm_wowned(rm),
653 ("rm_rlock: wlock already held for %s @ %s:%d",
654 rm->lock_object.lo_name, file, line));
655 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
656 NULL);
657 }
658
659 if (_rm_rlock(rm, tracker, trylock)) {
660 if (trylock)
661 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
662 line);
663 else
664 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
665 line);
666 WITNESS_LOCK(&rm->lock_object, 0, file, line);
667 TD_LOCKS_INC(curthread);
668 return (1);
669 } else if (trylock)
670 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
671
672 return (0);
673 }
674
675 void
676 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
677 const char *file, int line)
678 {
679
680 if (SCHEDULER_STOPPED())
681 return;
682
683 KASSERT(!rm_destroyed(rm),
684 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
685 _rm_assert(rm, RA_RLOCKED, file, line);
686 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
687 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
688 _rm_runlock(rm, tracker);
689 TD_LOCKS_DEC(curthread);
690 }
691
692 #else
693
694 /*
695 * Just strip out file and line arguments if no lock debugging is enabled in
696 * the kernel - we are called from a kernel module.
697 */
698 void
699 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
700 {
701
702 _rm_wlock(rm);
703 }
704
705 void
706 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
707 {
708
709 _rm_wunlock(rm);
710 }
711
712 int
713 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
714 int trylock, const char *file, int line)
715 {
716
717 return _rm_rlock(rm, tracker, trylock);
718 }
719
720 void
721 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
722 const char *file, int line)
723 {
724
725 _rm_runlock(rm, tracker);
726 }
727
728 #endif
729
730 #ifdef INVARIANT_SUPPORT
731 #ifndef INVARIANTS
732 #undef _rm_assert
733 #endif
734
735 /*
736 * Note that this does not need to use witness_assert() for read lock
737 * assertions since an exact count of read locks held by this thread
738 * is computable.
739 */
740 void
741 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
742 {
743 int count;
744
745 if (panicstr != NULL)
746 return;
747 switch (what) {
748 case RA_LOCKED:
749 case RA_LOCKED | RA_RECURSED:
750 case RA_LOCKED | RA_NOTRECURSED:
751 case RA_RLOCKED:
752 case RA_RLOCKED | RA_RECURSED:
753 case RA_RLOCKED | RA_NOTRECURSED:
754 /*
755 * Handle the write-locked case. Unlike other
756 * primitives, writers can never recurse.
757 */
758 if (rm_wowned(rm)) {
759 if (what & RA_RLOCKED)
760 panic("Lock %s exclusively locked @ %s:%d\n",
761 rm->lock_object.lo_name, file, line);
762 if (what & RA_RECURSED)
763 panic("Lock %s not recursed @ %s:%d\n",
764 rm->lock_object.lo_name, file, line);
765 break;
766 }
767
768 critical_enter();
769 count = rm_trackers_present(get_pcpu(), rm, curthread);
770 critical_exit();
771
772 if (count == 0)
773 panic("Lock %s not %slocked @ %s:%d\n",
774 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
775 "read " : "", file, line);
776 if (count > 1) {
777 if (what & RA_NOTRECURSED)
778 panic("Lock %s recursed @ %s:%d\n",
779 rm->lock_object.lo_name, file, line);
780 } else if (what & RA_RECURSED)
781 panic("Lock %s not recursed @ %s:%d\n",
782 rm->lock_object.lo_name, file, line);
783 break;
784 case RA_WLOCKED:
785 if (!rm_wowned(rm))
786 panic("Lock %s not exclusively locked @ %s:%d\n",
787 rm->lock_object.lo_name, file, line);
788 break;
789 case RA_UNLOCKED:
790 if (rm_wowned(rm))
791 panic("Lock %s exclusively locked @ %s:%d\n",
792 rm->lock_object.lo_name, file, line);
793
794 critical_enter();
795 count = rm_trackers_present(get_pcpu(), rm, curthread);
796 critical_exit();
797
798 if (count != 0)
799 panic("Lock %s read locked @ %s:%d\n",
800 rm->lock_object.lo_name, file, line);
801 break;
802 default:
803 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
804 line);
805 }
806 }
807 #endif /* INVARIANT_SUPPORT */
808
809 #ifdef DDB
810 static void
811 print_tracker(struct rm_priotracker *tr)
812 {
813 struct thread *td;
814
815 td = tr->rmp_thread;
816 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
817 td->td_proc->p_pid, td->td_name);
818 if (tr->rmp_flags & RMPF_ONQUEUE) {
819 db_printf("ONQUEUE");
820 if (tr->rmp_flags & RMPF_SIGNAL)
821 db_printf(",SIGNAL");
822 } else
823 db_printf("");
824 db_printf("}\n");
825 }
826
827 static void
828 db_show_rm(const struct lock_object *lock)
829 {
830 struct rm_priotracker *tr;
831 struct rm_queue *queue;
832 const struct rmlock *rm;
833 struct lock_class *lc;
834 struct pcpu *pc;
835
836 rm = (const struct rmlock *)lock;
837 db_printf(" writecpus: ");
838 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
839 db_printf("\n");
840 db_printf(" per-CPU readers:\n");
841 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
842 for (queue = pc->pc_rm_queue.rmq_next;
843 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
844 tr = (struct rm_priotracker *)queue;
845 if (tr->rmp_rmlock == rm)
846 print_tracker(tr);
847 }
848 db_printf(" active readers:\n");
849 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
850 print_tracker(tr);
851 lc = LOCK_CLASS(&rm->rm_wlock_object);
852 db_printf("Backing write-lock (%s):\n", lc->lc_name);
853 lc->lc_ddb_show(&rm->rm_wlock_object);
854 }
855 #endif
Cache object: 7f69e2f2b54ef4c49016ccbf74a34c26
|