1 /*-
2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_ddb.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41
42 #include <sys/kernel.h>
43 #include <sys/kdb.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/rmlock.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/turnstile.h>
52 #include <sys/lock_profile.h>
53 #include <machine/cpu.h>
54
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58
59 /*
60 * A cookie to mark destroyed rmlocks. This is stored in the head of
61 * rm_activeReaders.
62 */
63 #define RM_DESTROYED ((void *)0xdead)
64
65 #define rm_destroyed(rm) \
66 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
67
68 #define RMPF_ONQUEUE 1
69 #define RMPF_SIGNAL 2
70
71 #ifndef INVARIANTS
72 #define _rm_assert(c, what, file, line)
73 #endif
74
75 static void assert_rm(const struct lock_object *lock, int what);
76 #ifdef DDB
77 static void db_show_rm(const struct lock_object *lock);
78 #endif
79 static void lock_rm(struct lock_object *lock, uintptr_t how);
80 #ifdef KDTRACE_HOOKS
81 static int owner_rm(const struct lock_object *lock, struct thread **owner);
82 #endif
83 static uintptr_t unlock_rm(struct lock_object *lock);
84
85 struct lock_class lock_class_rm = {
86 .lc_name = "rm",
87 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
88 .lc_assert = assert_rm,
89 #ifdef DDB
90 .lc_ddb_show = db_show_rm,
91 #endif
92 .lc_lock = lock_rm,
93 .lc_unlock = unlock_rm,
94 #ifdef KDTRACE_HOOKS
95 .lc_owner = owner_rm,
96 #endif
97 };
98
99 struct lock_class lock_class_rm_sleepable = {
100 .lc_name = "sleepable rm",
101 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
102 .lc_assert = assert_rm,
103 #ifdef DDB
104 .lc_ddb_show = db_show_rm,
105 #endif
106 .lc_lock = lock_rm,
107 .lc_unlock = unlock_rm,
108 #ifdef KDTRACE_HOOKS
109 .lc_owner = owner_rm,
110 #endif
111 };
112
113 static void
114 assert_rm(const struct lock_object *lock, int what)
115 {
116
117 rm_assert((const struct rmlock *)lock, what);
118 }
119
120 static void
121 lock_rm(struct lock_object *lock, uintptr_t how)
122 {
123 struct rmlock *rm;
124 struct rm_priotracker *tracker;
125
126 rm = (struct rmlock *)lock;
127 if (how == 0)
128 rm_wlock(rm);
129 else {
130 tracker = (struct rm_priotracker *)how;
131 rm_rlock(rm, tracker);
132 }
133 }
134
135 static uintptr_t
136 unlock_rm(struct lock_object *lock)
137 {
138 struct thread *td;
139 struct pcpu *pc;
140 struct rmlock *rm;
141 struct rm_queue *queue;
142 struct rm_priotracker *tracker;
143 uintptr_t how;
144
145 rm = (struct rmlock *)lock;
146 tracker = NULL;
147 how = 0;
148 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
149 if (rm_wowned(rm))
150 rm_wunlock(rm);
151 else {
152 /*
153 * Find the right rm_priotracker structure for curthread.
154 * The guarantee about its uniqueness is given by the fact
155 * we already asserted the lock wasn't recursively acquired.
156 */
157 critical_enter();
158 td = curthread;
159 pc = pcpu_find(curcpu);
160 for (queue = pc->pc_rm_queue.rmq_next;
161 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
162 tracker = (struct rm_priotracker *)queue;
163 if ((tracker->rmp_rmlock == rm) &&
164 (tracker->rmp_thread == td)) {
165 how = (uintptr_t)tracker;
166 break;
167 }
168 }
169 KASSERT(tracker != NULL,
170 ("rm_priotracker is non-NULL when lock held in read mode"));
171 critical_exit();
172 rm_runlock(rm, tracker);
173 }
174 return (how);
175 }
176
177 #ifdef KDTRACE_HOOKS
178 static int
179 owner_rm(const struct lock_object *lock, struct thread **owner)
180 {
181 const struct rmlock *rm;
182 struct lock_class *lc;
183
184 rm = (const struct rmlock *)lock;
185 lc = LOCK_CLASS(&rm->rm_wlock_object);
186 return (lc->lc_owner(&rm->rm_wlock_object, owner));
187 }
188 #endif
189
190 static struct mtx rm_spinlock;
191
192 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
193
194 /*
195 * Add or remove tracker from per-cpu list.
196 *
197 * The per-cpu list can be traversed at any time in forward direction from an
198 * interrupt on the *local* cpu.
199 */
200 static void inline
201 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
202 {
203 struct rm_queue *next;
204
205 /* Initialize all tracker pointers */
206 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
207 next = pc->pc_rm_queue.rmq_next;
208 tracker->rmp_cpuQueue.rmq_next = next;
209
210 /* rmq_prev is not used during froward traversal. */
211 next->rmq_prev = &tracker->rmp_cpuQueue;
212
213 /* Update pointer to first element. */
214 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
215 }
216
217 /*
218 * Return a count of the number of trackers the thread 'td' already
219 * has on this CPU for the lock 'rm'.
220 */
221 static int
222 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
223 const struct thread *td)
224 {
225 struct rm_queue *queue;
226 struct rm_priotracker *tracker;
227 int count;
228
229 count = 0;
230 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
231 queue = queue->rmq_next) {
232 tracker = (struct rm_priotracker *)queue;
233 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
234 count++;
235 }
236 return (count);
237 }
238
239 static void inline
240 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
241 {
242 struct rm_queue *next, *prev;
243
244 next = tracker->rmp_cpuQueue.rmq_next;
245 prev = tracker->rmp_cpuQueue.rmq_prev;
246
247 /* Not used during forward traversal. */
248 next->rmq_prev = prev;
249
250 /* Remove from list. */
251 prev->rmq_next = next;
252 }
253
254 static void
255 rm_cleanIPI(void *arg)
256 {
257 struct pcpu *pc;
258 struct rmlock *rm = arg;
259 struct rm_priotracker *tracker;
260 struct rm_queue *queue;
261 pc = pcpu_find(curcpu);
262
263 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
264 queue = queue->rmq_next) {
265 tracker = (struct rm_priotracker *)queue;
266 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
267 tracker->rmp_flags = RMPF_ONQUEUE;
268 mtx_lock_spin(&rm_spinlock);
269 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
270 rmp_qentry);
271 mtx_unlock_spin(&rm_spinlock);
272 }
273 }
274 }
275
276 void
277 rm_init_flags(struct rmlock *rm, const char *name, int opts)
278 {
279 struct lock_class *lc;
280 int liflags, xflags;
281
282 liflags = 0;
283 if (!(opts & RM_NOWITNESS))
284 liflags |= LO_WITNESS;
285 if (opts & RM_RECURSE)
286 liflags |= LO_RECURSABLE;
287 if (opts & RM_NEW)
288 liflags |= LO_NEW;
289 rm->rm_writecpus = all_cpus;
290 LIST_INIT(&rm->rm_activeReaders);
291 if (opts & RM_SLEEPABLE) {
292 liflags |= LO_SLEEPABLE;
293 lc = &lock_class_rm_sleepable;
294 xflags = (opts & RM_NEW ? SX_NEW : 0);
295 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
296 xflags | SX_NOWITNESS);
297 } else {
298 lc = &lock_class_rm;
299 xflags = (opts & RM_NEW ? MTX_NEW : 0);
300 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
301 xflags | MTX_NOWITNESS);
302 }
303 lock_init(&rm->lock_object, lc, name, NULL, liflags);
304 }
305
306 void
307 rm_init(struct rmlock *rm, const char *name)
308 {
309
310 rm_init_flags(rm, name, 0);
311 }
312
313 void
314 rm_destroy(struct rmlock *rm)
315 {
316
317 rm_assert(rm, RA_UNLOCKED);
318 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
319 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
320 sx_destroy(&rm->rm_lock_sx);
321 else
322 mtx_destroy(&rm->rm_lock_mtx);
323 lock_destroy(&rm->lock_object);
324 }
325
326 int
327 rm_wowned(const struct rmlock *rm)
328 {
329
330 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
331 return (sx_xlocked(&rm->rm_lock_sx));
332 else
333 return (mtx_owned(&rm->rm_lock_mtx));
334 }
335
336 void
337 rm_sysinit(void *arg)
338 {
339 struct rm_args *args;
340
341 args = arg;
342 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
343 }
344
345 static int
346 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
347 {
348 struct pcpu *pc;
349
350 critical_enter();
351 pc = pcpu_find(curcpu);
352
353 /* Check if we just need to do a proper critical_exit. */
354 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
355 critical_exit();
356 return (1);
357 }
358
359 /* Remove our tracker from the per-cpu list. */
360 rm_tracker_remove(pc, tracker);
361
362 /*
363 * Check to see if the IPI granted us the lock after all. The load of
364 * rmp_flags must happen after the tracker is removed from the list.
365 */
366 __compiler_membar();
367 if (tracker->rmp_flags) {
368 /* Just add back tracker - we hold the lock. */
369 rm_tracker_add(pc, tracker);
370 critical_exit();
371 return (1);
372 }
373
374 /*
375 * We allow readers to acquire a lock even if a writer is blocked if
376 * the lock is recursive and the reader already holds the lock.
377 */
378 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
379 /*
380 * Just grant the lock if this thread already has a tracker
381 * for this lock on the per-cpu queue.
382 */
383 if (rm_trackers_present(pc, rm, curthread) != 0) {
384 mtx_lock_spin(&rm_spinlock);
385 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
386 rmp_qentry);
387 tracker->rmp_flags = RMPF_ONQUEUE;
388 mtx_unlock_spin(&rm_spinlock);
389 rm_tracker_add(pc, tracker);
390 critical_exit();
391 return (1);
392 }
393 }
394
395 sched_unpin();
396 critical_exit();
397
398 if (trylock) {
399 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
400 if (!sx_try_xlock(&rm->rm_lock_sx))
401 return (0);
402 } else {
403 if (!mtx_trylock(&rm->rm_lock_mtx))
404 return (0);
405 }
406 } else {
407 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
408 THREAD_SLEEPING_OK();
409 sx_xlock(&rm->rm_lock_sx);
410 THREAD_NO_SLEEPING();
411 } else
412 mtx_lock(&rm->rm_lock_mtx);
413 }
414
415 critical_enter();
416 pc = pcpu_find(curcpu);
417 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
418 rm_tracker_add(pc, tracker);
419 sched_pin();
420 critical_exit();
421
422 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
423 sx_xunlock(&rm->rm_lock_sx);
424 else
425 mtx_unlock(&rm->rm_lock_mtx);
426
427 return (1);
428 }
429
430 int
431 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
432 {
433 struct thread *td = curthread;
434 struct pcpu *pc;
435
436 if (SCHEDULER_STOPPED())
437 return (1);
438
439 tracker->rmp_flags = 0;
440 tracker->rmp_thread = td;
441 tracker->rmp_rmlock = rm;
442
443 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
444 THREAD_NO_SLEEPING();
445
446 td->td_critnest++; /* critical_enter(); */
447
448 __compiler_membar();
449
450 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
451
452 rm_tracker_add(pc, tracker);
453
454 sched_pin();
455
456 __compiler_membar();
457
458 td->td_critnest--;
459
460 /*
461 * Fast path to combine two common conditions into a single
462 * conditional jump.
463 */
464 if (0 == (td->td_owepreempt |
465 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
466 return (1);
467
468 /* We do not have a read token and need to acquire one. */
469 return _rm_rlock_hard(rm, tracker, trylock);
470 }
471
472 static void
473 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
474 {
475
476 if (td->td_owepreempt) {
477 td->td_critnest++;
478 critical_exit();
479 }
480
481 if (!tracker->rmp_flags)
482 return;
483
484 mtx_lock_spin(&rm_spinlock);
485 LIST_REMOVE(tracker, rmp_qentry);
486
487 if (tracker->rmp_flags & RMPF_SIGNAL) {
488 struct rmlock *rm;
489 struct turnstile *ts;
490
491 rm = tracker->rmp_rmlock;
492
493 turnstile_chain_lock(&rm->lock_object);
494 mtx_unlock_spin(&rm_spinlock);
495
496 ts = turnstile_lookup(&rm->lock_object);
497
498 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
499 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
500 turnstile_chain_unlock(&rm->lock_object);
501 } else
502 mtx_unlock_spin(&rm_spinlock);
503 }
504
505 void
506 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
507 {
508 struct pcpu *pc;
509 struct thread *td = tracker->rmp_thread;
510
511 if (SCHEDULER_STOPPED())
512 return;
513
514 td->td_critnest++; /* critical_enter(); */
515 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
516 rm_tracker_remove(pc, tracker);
517 td->td_critnest--;
518 sched_unpin();
519
520 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
521 THREAD_SLEEPING_OK();
522
523 if (0 == (td->td_owepreempt | tracker->rmp_flags))
524 return;
525
526 _rm_unlock_hard(td, tracker);
527 }
528
529 void
530 _rm_wlock(struct rmlock *rm)
531 {
532 struct rm_priotracker *prio;
533 struct turnstile *ts;
534 cpuset_t readcpus;
535
536 if (SCHEDULER_STOPPED())
537 return;
538
539 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
540 sx_xlock(&rm->rm_lock_sx);
541 else
542 mtx_lock(&rm->rm_lock_mtx);
543
544 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
545 /* Get all read tokens back */
546 readcpus = all_cpus;
547 CPU_NAND(&readcpus, &rm->rm_writecpus);
548 rm->rm_writecpus = all_cpus;
549
550 /*
551 * Assumes rm->rm_writecpus update is visible on other CPUs
552 * before rm_cleanIPI is called.
553 */
554 #ifdef SMP
555 smp_rendezvous_cpus(readcpus,
556 smp_no_rendezvous_barrier,
557 rm_cleanIPI,
558 smp_no_rendezvous_barrier,
559 rm);
560
561 #else
562 rm_cleanIPI(rm);
563 #endif
564
565 mtx_lock_spin(&rm_spinlock);
566 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
567 ts = turnstile_trywait(&rm->lock_object);
568 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
569 mtx_unlock_spin(&rm_spinlock);
570 turnstile_wait(ts, prio->rmp_thread,
571 TS_EXCLUSIVE_QUEUE);
572 mtx_lock_spin(&rm_spinlock);
573 }
574 mtx_unlock_spin(&rm_spinlock);
575 }
576 }
577
578 void
579 _rm_wunlock(struct rmlock *rm)
580 {
581
582 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
583 sx_xunlock(&rm->rm_lock_sx);
584 else
585 mtx_unlock(&rm->rm_lock_mtx);
586 }
587
588 #if LOCK_DEBUG > 0
589
590 void
591 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
592 {
593
594 if (SCHEDULER_STOPPED())
595 return;
596
597 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
598 ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
599 curthread, rm->lock_object.lo_name, file, line));
600 KASSERT(!rm_destroyed(rm),
601 ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
602 _rm_assert(rm, RA_UNLOCKED, file, line);
603
604 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
605 file, line, NULL);
606
607 _rm_wlock(rm);
608
609 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
610 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
611 TD_LOCKS_INC(curthread);
612 }
613
614 void
615 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
616 {
617
618 if (SCHEDULER_STOPPED())
619 return;
620
621 KASSERT(!rm_destroyed(rm),
622 ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
623 _rm_assert(rm, RA_WLOCKED, file, line);
624 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
625 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
626 _rm_wunlock(rm);
627 TD_LOCKS_DEC(curthread);
628 }
629
630 int
631 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
632 int trylock, const char *file, int line)
633 {
634
635 if (SCHEDULER_STOPPED())
636 return (1);
637
638 #ifdef INVARIANTS
639 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
640 critical_enter();
641 KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
642 curthread) == 0,
643 ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
644 rm->lock_object.lo_name, file, line));
645 critical_exit();
646 }
647 #endif
648 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
649 ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
650 curthread, rm->lock_object.lo_name, file, line));
651 KASSERT(!rm_destroyed(rm),
652 ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
653 if (!trylock) {
654 KASSERT(!rm_wowned(rm),
655 ("rm_rlock: wlock already held for %s @ %s:%d",
656 rm->lock_object.lo_name, file, line));
657 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
658 NULL);
659 }
660
661 if (_rm_rlock(rm, tracker, trylock)) {
662 if (trylock)
663 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
664 line);
665 else
666 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
667 line);
668 WITNESS_LOCK(&rm->lock_object, 0, file, line);
669 TD_LOCKS_INC(curthread);
670 return (1);
671 } else if (trylock)
672 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
673
674 return (0);
675 }
676
677 void
678 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
679 const char *file, int line)
680 {
681
682 if (SCHEDULER_STOPPED())
683 return;
684
685 KASSERT(!rm_destroyed(rm),
686 ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
687 _rm_assert(rm, RA_RLOCKED, file, line);
688 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
689 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
690 _rm_runlock(rm, tracker);
691 TD_LOCKS_DEC(curthread);
692 }
693
694 #else
695
696 /*
697 * Just strip out file and line arguments if no lock debugging is enabled in
698 * the kernel - we are called from a kernel module.
699 */
700 void
701 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
702 {
703
704 _rm_wlock(rm);
705 }
706
707 void
708 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
709 {
710
711 _rm_wunlock(rm);
712 }
713
714 int
715 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
716 int trylock, const char *file, int line)
717 {
718
719 return _rm_rlock(rm, tracker, trylock);
720 }
721
722 void
723 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
724 const char *file, int line)
725 {
726
727 _rm_runlock(rm, tracker);
728 }
729
730 #endif
731
732 #ifdef INVARIANT_SUPPORT
733 #ifndef INVARIANTS
734 #undef _rm_assert
735 #endif
736
737 /*
738 * Note that this does not need to use witness_assert() for read lock
739 * assertions since an exact count of read locks held by this thread
740 * is computable.
741 */
742 void
743 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
744 {
745 int count;
746
747 if (SCHEDULER_STOPPED())
748 return;
749 switch (what) {
750 case RA_LOCKED:
751 case RA_LOCKED | RA_RECURSED:
752 case RA_LOCKED | RA_NOTRECURSED:
753 case RA_RLOCKED:
754 case RA_RLOCKED | RA_RECURSED:
755 case RA_RLOCKED | RA_NOTRECURSED:
756 /*
757 * Handle the write-locked case. Unlike other
758 * primitives, writers can never recurse.
759 */
760 if (rm_wowned(rm)) {
761 if (what & RA_RLOCKED)
762 panic("Lock %s exclusively locked @ %s:%d\n",
763 rm->lock_object.lo_name, file, line);
764 if (what & RA_RECURSED)
765 panic("Lock %s not recursed @ %s:%d\n",
766 rm->lock_object.lo_name, file, line);
767 break;
768 }
769
770 critical_enter();
771 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
772 critical_exit();
773
774 if (count == 0)
775 panic("Lock %s not %slocked @ %s:%d\n",
776 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
777 "read " : "", file, line);
778 if (count > 1) {
779 if (what & RA_NOTRECURSED)
780 panic("Lock %s recursed @ %s:%d\n",
781 rm->lock_object.lo_name, file, line);
782 } else if (what & RA_RECURSED)
783 panic("Lock %s not recursed @ %s:%d\n",
784 rm->lock_object.lo_name, file, line);
785 break;
786 case RA_WLOCKED:
787 if (!rm_wowned(rm))
788 panic("Lock %s not exclusively locked @ %s:%d\n",
789 rm->lock_object.lo_name, file, line);
790 break;
791 case RA_UNLOCKED:
792 if (rm_wowned(rm))
793 panic("Lock %s exclusively locked @ %s:%d\n",
794 rm->lock_object.lo_name, file, line);
795
796 critical_enter();
797 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
798 critical_exit();
799
800 if (count != 0)
801 panic("Lock %s read locked @ %s:%d\n",
802 rm->lock_object.lo_name, file, line);
803 break;
804 default:
805 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
806 line);
807 }
808 }
809 #endif /* INVARIANT_SUPPORT */
810
811 #ifdef DDB
812 static void
813 print_tracker(struct rm_priotracker *tr)
814 {
815 struct thread *td;
816
817 td = tr->rmp_thread;
818 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
819 td->td_proc->p_pid, td->td_name);
820 if (tr->rmp_flags & RMPF_ONQUEUE) {
821 db_printf("ONQUEUE");
822 if (tr->rmp_flags & RMPF_SIGNAL)
823 db_printf(",SIGNAL");
824 } else
825 db_printf("");
826 db_printf("}\n");
827 }
828
829 static void
830 db_show_rm(const struct lock_object *lock)
831 {
832 struct rm_priotracker *tr;
833 struct rm_queue *queue;
834 const struct rmlock *rm;
835 struct lock_class *lc;
836 struct pcpu *pc;
837
838 rm = (const struct rmlock *)lock;
839 db_printf(" writecpus: ");
840 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
841 db_printf("\n");
842 db_printf(" per-CPU readers:\n");
843 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
844 for (queue = pc->pc_rm_queue.rmq_next;
845 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
846 tr = (struct rm_priotracker *)queue;
847 if (tr->rmp_rmlock == rm)
848 print_tracker(tr);
849 }
850 db_printf(" active readers:\n");
851 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
852 print_tracker(tr);
853 lc = LOCK_CLASS(&rm->rm_wlock_object);
854 db_printf("Backing write-lock (%s):\n", lc->lc_name);
855 lc->lc_ddb_show(&rm->rm_wlock_object);
856 }
857 #endif
Cache object: c65f59143b81d721e0260da40efa54e1
|