FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c
1 /*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30 /*
31 * Shared/exclusive locks. This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41 #include "opt_no_adaptive_sx.h"
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/11.1/sys/kern/kern_sx.c 320241 2017-06-22 18:40:34Z markj $");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sched.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sx.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59
60 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
61 #include <machine/cpu.h>
62 #endif
63
64 #ifdef DDB
65 #include <ddb/ddb.h>
66 #endif
67
68 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
69 #define ADAPTIVE_SX
70 #endif
71
72 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
73
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DECLARE( , , lock, failed);
77 #endif
78
79 /* Handy macros for sleep queues. */
80 #define SQ_EXCLUSIVE_QUEUE 0
81 #define SQ_SHARED_QUEUE 1
82
83 /*
84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
85 * drop Giant anytime we have to sleep or if we adaptively spin.
86 */
87 #define GIANT_DECLARE \
88 int _giantcnt = 0; \
89 WITNESS_SAVE_DECL(Giant) \
90
91 #define GIANT_SAVE() do { \
92 if (mtx_owned(&Giant)) { \
93 WITNESS_SAVE(&Giant.lock_object, Giant); \
94 while (mtx_owned(&Giant)) { \
95 _giantcnt++; \
96 mtx_unlock(&Giant); \
97 } \
98 } \
99 } while (0)
100
101 #define GIANT_RESTORE() do { \
102 if (_giantcnt > 0) { \
103 mtx_assert(&Giant, MA_NOTOWNED); \
104 while (_giantcnt--) \
105 mtx_lock(&Giant); \
106 WITNESS_RESTORE(&Giant.lock_object, Giant); \
107 } \
108 } while (0)
109
110 /*
111 * Returns true if an exclusive lock is recursed. It assumes
112 * curthread currently has an exclusive lock.
113 */
114 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
115
116 static void assert_sx(const struct lock_object *lock, int what);
117 #ifdef DDB
118 static void db_show_sx(const struct lock_object *lock);
119 #endif
120 static void lock_sx(struct lock_object *lock, uintptr_t how);
121 #ifdef KDTRACE_HOOKS
122 static int owner_sx(const struct lock_object *lock, struct thread **owner);
123 #endif
124 static uintptr_t unlock_sx(struct lock_object *lock);
125
126 struct lock_class lock_class_sx = {
127 .lc_name = "sx",
128 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
129 .lc_assert = assert_sx,
130 #ifdef DDB
131 .lc_ddb_show = db_show_sx,
132 #endif
133 .lc_lock = lock_sx,
134 .lc_unlock = unlock_sx,
135 #ifdef KDTRACE_HOOKS
136 .lc_owner = owner_sx,
137 #endif
138 };
139
140 #ifndef INVARIANTS
141 #define _sx_assert(sx, what, file, line)
142 #endif
143
144 #ifdef ADAPTIVE_SX
145 static u_int asx_retries = 10;
146 static u_int asx_loops = 10000;
147 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
148 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
149 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
150
151 static struct lock_delay_config __read_mostly sx_delay;
152
153 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
154 0, "");
155 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
156 0, "");
157
158 LOCK_DELAY_SYSINIT_DEFAULT(sx_delay);
159 #endif
160
161 void
162 assert_sx(const struct lock_object *lock, int what)
163 {
164
165 sx_assert((const struct sx *)lock, what);
166 }
167
168 void
169 lock_sx(struct lock_object *lock, uintptr_t how)
170 {
171 struct sx *sx;
172
173 sx = (struct sx *)lock;
174 if (how)
175 sx_slock(sx);
176 else
177 sx_xlock(sx);
178 }
179
180 uintptr_t
181 unlock_sx(struct lock_object *lock)
182 {
183 struct sx *sx;
184
185 sx = (struct sx *)lock;
186 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
187 if (sx_xlocked(sx)) {
188 sx_xunlock(sx);
189 return (0);
190 } else {
191 sx_sunlock(sx);
192 return (1);
193 }
194 }
195
196 #ifdef KDTRACE_HOOKS
197 int
198 owner_sx(const struct lock_object *lock, struct thread **owner)
199 {
200 const struct sx *sx = (const struct sx *)lock;
201 uintptr_t x = sx->sx_lock;
202
203 *owner = (struct thread *)SX_OWNER(x);
204 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
205 (*owner != NULL));
206 }
207 #endif
208
209 void
210 sx_sysinit(void *arg)
211 {
212 struct sx_args *sargs = arg;
213
214 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
215 }
216
217 void
218 sx_init_flags(struct sx *sx, const char *description, int opts)
219 {
220 int flags;
221
222 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
223 SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
224 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
225 ("%s: sx_lock not aligned for %s: %p", __func__, description,
226 &sx->sx_lock));
227
228 flags = LO_SLEEPABLE | LO_UPGRADABLE;
229 if (opts & SX_DUPOK)
230 flags |= LO_DUPOK;
231 if (opts & SX_NOPROFILE)
232 flags |= LO_NOPROFILE;
233 if (!(opts & SX_NOWITNESS))
234 flags |= LO_WITNESS;
235 if (opts & SX_RECURSE)
236 flags |= LO_RECURSABLE;
237 if (opts & SX_QUIET)
238 flags |= LO_QUIET;
239 if (opts & SX_NEW)
240 flags |= LO_NEW;
241
242 flags |= opts & SX_NOADAPTIVE;
243 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
244 sx->sx_lock = SX_LOCK_UNLOCKED;
245 sx->sx_recurse = 0;
246 }
247
248 void
249 sx_destroy(struct sx *sx)
250 {
251
252 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
253 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
254 sx->sx_lock = SX_LOCK_DESTROYED;
255 lock_destroy(&sx->lock_object);
256 }
257
258 int
259 sx_try_slock_(struct sx *sx, const char *file, int line)
260 {
261 uintptr_t x;
262
263 if (SCHEDULER_STOPPED())
264 return (1);
265
266 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
267 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
268 curthread, sx->lock_object.lo_name, file, line));
269
270 x = sx->sx_lock;
271 for (;;) {
272 KASSERT(x != SX_LOCK_DESTROYED,
273 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
274 if (!(x & SX_LOCK_SHARED))
275 break;
276 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
277 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
278 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
279 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
280 sx, 0, 0, file, line, LOCKSTAT_READER);
281 TD_LOCKS_INC(curthread);
282 return (1);
283 }
284 }
285
286 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
287 return (0);
288 }
289
290 int
291 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
292 {
293 uintptr_t tid, x;
294 int error = 0;
295
296 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
297 !TD_IS_IDLETHREAD(curthread),
298 ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
299 curthread, sx->lock_object.lo_name, file, line));
300 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
301 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
302 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
303 line, NULL);
304 tid = (uintptr_t)curthread;
305 x = SX_LOCK_UNLOCKED;
306 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
307 error = _sx_xlock_hard(sx, x, tid, opts, file, line);
308 else
309 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
310 0, 0, file, line, LOCKSTAT_WRITER);
311 if (!error) {
312 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
313 file, line);
314 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
315 TD_LOCKS_INC(curthread);
316 }
317
318 return (error);
319 }
320
321 int
322 sx_try_xlock_(struct sx *sx, const char *file, int line)
323 {
324 struct thread *td;
325 uintptr_t tid, x;
326 int rval;
327 bool recursed;
328
329 td = curthread;
330 tid = (uintptr_t)td;
331 if (SCHEDULER_STOPPED_TD(td))
332 return (1);
333
334 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
335 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
336 curthread, sx->lock_object.lo_name, file, line));
337 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
338 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
339
340 rval = 1;
341 recursed = false;
342 x = SX_LOCK_UNLOCKED;
343 for (;;) {
344 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
345 break;
346 if (x == SX_LOCK_UNLOCKED)
347 continue;
348 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
349 sx->sx_recurse++;
350 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
351 break;
352 }
353 rval = 0;
354 break;
355 }
356
357 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
358 if (rval) {
359 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
360 file, line);
361 if (!recursed)
362 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
363 sx, 0, 0, file, line, LOCKSTAT_WRITER);
364 TD_LOCKS_INC(curthread);
365 }
366
367 return (rval);
368 }
369
370 void
371 _sx_xunlock(struct sx *sx, const char *file, int line)
372 {
373
374 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
375 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
376 _sx_assert(sx, SA_XLOCKED, file, line);
377 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
378 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
379 line);
380 #if LOCK_DEBUG > 0
381 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
382 #else
383 __sx_xunlock(sx, curthread, file, line);
384 #endif
385 TD_LOCKS_DEC(curthread);
386 }
387
388 /*
389 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
390 * This will only succeed if this thread holds a single shared lock.
391 * Return 1 if if the upgrade succeed, 0 otherwise.
392 */
393 int
394 sx_try_upgrade_(struct sx *sx, const char *file, int line)
395 {
396 uintptr_t x;
397 int success;
398
399 if (SCHEDULER_STOPPED())
400 return (1);
401
402 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
403 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
404 _sx_assert(sx, SA_SLOCKED, file, line);
405
406 /*
407 * Try to switch from one shared lock to an exclusive lock. We need
408 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
409 * we will wake up the exclusive waiters when we drop the lock.
410 */
411 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
412 success = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
413 (uintptr_t)curthread | x);
414 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
415 if (success) {
416 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
417 file, line);
418 LOCKSTAT_RECORD0(sx__upgrade, sx);
419 }
420 return (success);
421 }
422
423 /*
424 * Downgrade an unrecursed exclusive lock into a single shared lock.
425 */
426 void
427 sx_downgrade_(struct sx *sx, const char *file, int line)
428 {
429 uintptr_t x;
430 int wakeup_swapper;
431
432 if (SCHEDULER_STOPPED())
433 return;
434
435 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
436 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
437 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
438 #ifndef INVARIANTS
439 if (sx_recursed(sx))
440 panic("downgrade of a recursed lock");
441 #endif
442
443 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
444
445 /*
446 * Try to switch from an exclusive lock with no shared waiters
447 * to one sharer with no shared waiters. If there are
448 * exclusive waiters, we don't need to lock the sleep queue so
449 * long as we preserve the flag. We do one quick try and if
450 * that fails we grab the sleepq lock to keep the flags from
451 * changing and do it the slow way.
452 *
453 * We have to lock the sleep queue if there are shared waiters
454 * so we can wake them up.
455 */
456 x = sx->sx_lock;
457 if (!(x & SX_LOCK_SHARED_WAITERS) &&
458 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
459 (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
460 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
461 return;
462 }
463
464 /*
465 * Lock the sleep queue so we can read the waiters bits
466 * without any races and wakeup any shared waiters.
467 */
468 sleepq_lock(&sx->lock_object);
469
470 /*
471 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
472 * shared lock. If there are any shared waiters, wake them up.
473 */
474 wakeup_swapper = 0;
475 x = sx->sx_lock;
476 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
477 (x & SX_LOCK_EXCLUSIVE_WAITERS));
478 if (x & SX_LOCK_SHARED_WAITERS)
479 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
480 0, SQ_SHARED_QUEUE);
481 sleepq_release(&sx->lock_object);
482
483 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
484 LOCKSTAT_RECORD0(sx__downgrade, sx);
485
486 if (wakeup_swapper)
487 kick_proc0();
488 }
489
490 /*
491 * This function represents the so-called 'hard case' for sx_xlock
492 * operation. All 'easy case' failures are redirected to this. Note
493 * that ideally this would be a static function, but it needs to be
494 * accessible from at least sx.h.
495 */
496 int
497 _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
498 const char *file, int line)
499 {
500 GIANT_DECLARE;
501 #ifdef ADAPTIVE_SX
502 volatile struct thread *owner;
503 u_int i, spintries = 0;
504 #endif
505 #ifdef LOCK_PROFILING
506 uint64_t waittime = 0;
507 int contested = 0;
508 #endif
509 int error = 0;
510 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
511 struct lock_delay_arg lda;
512 #endif
513 #ifdef KDTRACE_HOOKS
514 uintptr_t state;
515 u_int sleep_cnt = 0;
516 int64_t sleep_time = 0;
517 int64_t all_time = 0;
518 #endif
519
520 if (SCHEDULER_STOPPED())
521 return (0);
522
523 #if defined(ADAPTIVE_SX)
524 lock_delay_arg_init(&lda, &sx_delay);
525 #elif defined(KDTRACE_HOOKS)
526 lock_delay_arg_init(&lda, NULL);
527 #endif
528
529 if (__predict_false(x == SX_LOCK_UNLOCKED))
530 x = SX_READ_VALUE(sx);
531
532 /* If we already hold an exclusive lock, then recurse. */
533 if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
534 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
535 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
536 sx->lock_object.lo_name, file, line));
537 sx->sx_recurse++;
538 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
539 if (LOCK_LOG_TEST(&sx->lock_object, 0))
540 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
541 return (0);
542 }
543
544 if (LOCK_LOG_TEST(&sx->lock_object, 0))
545 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
546 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
547
548 #ifdef KDTRACE_HOOKS
549 all_time -= lockstat_nsecs(&sx->lock_object);
550 state = x;
551 #endif
552 for (;;) {
553 if (x == SX_LOCK_UNLOCKED) {
554 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
555 break;
556 continue;
557 }
558 #ifdef KDTRACE_HOOKS
559 lda.spin_cnt++;
560 #endif
561 #ifdef HWPMC_HOOKS
562 PMC_SOFT_CALL( , , lock, failed);
563 #endif
564 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
565 &waittime);
566 #ifdef ADAPTIVE_SX
567 /*
568 * If the lock is write locked and the owner is
569 * running on another CPU, spin until the owner stops
570 * running or the state of the lock changes.
571 */
572 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
573 if ((x & SX_LOCK_SHARED) == 0) {
574 owner = lv_sx_owner(x);
575 if (TD_IS_RUNNING(owner)) {
576 if (LOCK_LOG_TEST(&sx->lock_object, 0))
577 CTR3(KTR_LOCK,
578 "%s: spinning on %p held by %p",
579 __func__, sx, owner);
580 KTR_STATE1(KTR_SCHED, "thread",
581 sched_tdname(curthread), "spinning",
582 "lockname:\"%s\"",
583 sx->lock_object.lo_name);
584 GIANT_SAVE();
585 do {
586 lock_delay(&lda);
587 x = SX_READ_VALUE(sx);
588 owner = lv_sx_owner(x);
589 } while (owner != NULL &&
590 TD_IS_RUNNING(owner));
591 KTR_STATE0(KTR_SCHED, "thread",
592 sched_tdname(curthread), "running");
593 continue;
594 }
595 } else if (SX_SHARERS(x) && spintries < asx_retries) {
596 KTR_STATE1(KTR_SCHED, "thread",
597 sched_tdname(curthread), "spinning",
598 "lockname:\"%s\"", sx->lock_object.lo_name);
599 GIANT_SAVE();
600 spintries++;
601 for (i = 0; i < asx_loops; i++) {
602 if (LOCK_LOG_TEST(&sx->lock_object, 0))
603 CTR4(KTR_LOCK,
604 "%s: shared spinning on %p with %u and %u",
605 __func__, sx, spintries, i);
606 x = sx->sx_lock;
607 if ((x & SX_LOCK_SHARED) == 0 ||
608 SX_SHARERS(x) == 0)
609 break;
610 cpu_spinwait();
611 #ifdef KDTRACE_HOOKS
612 lda.spin_cnt++;
613 #endif
614 }
615 KTR_STATE0(KTR_SCHED, "thread",
616 sched_tdname(curthread), "running");
617 x = SX_READ_VALUE(sx);
618 if (i != asx_loops)
619 continue;
620 }
621 }
622 #endif
623
624 sleepq_lock(&sx->lock_object);
625 x = SX_READ_VALUE(sx);
626
627 /*
628 * If the lock was released while spinning on the
629 * sleep queue chain lock, try again.
630 */
631 if (x == SX_LOCK_UNLOCKED) {
632 sleepq_release(&sx->lock_object);
633 continue;
634 }
635
636 #ifdef ADAPTIVE_SX
637 /*
638 * The current lock owner might have started executing
639 * on another CPU (or the lock could have changed
640 * owners) while we were waiting on the sleep queue
641 * chain lock. If so, drop the sleep queue lock and try
642 * again.
643 */
644 if (!(x & SX_LOCK_SHARED) &&
645 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
646 owner = (struct thread *)SX_OWNER(x);
647 if (TD_IS_RUNNING(owner)) {
648 sleepq_release(&sx->lock_object);
649 continue;
650 }
651 }
652 #endif
653
654 /*
655 * If an exclusive lock was released with both shared
656 * and exclusive waiters and a shared waiter hasn't
657 * woken up and acquired the lock yet, sx_lock will be
658 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
659 * If we see that value, try to acquire it once. Note
660 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
661 * as there are other exclusive waiters still. If we
662 * fail, restart the loop.
663 */
664 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
665 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
666 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
667 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
668 sleepq_release(&sx->lock_object);
669 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
670 __func__, sx);
671 break;
672 }
673 sleepq_release(&sx->lock_object);
674 x = SX_READ_VALUE(sx);
675 continue;
676 }
677
678 /*
679 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail,
680 * than loop back and retry.
681 */
682 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
683 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
684 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
685 sleepq_release(&sx->lock_object);
686 x = SX_READ_VALUE(sx);
687 continue;
688 }
689 if (LOCK_LOG_TEST(&sx->lock_object, 0))
690 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
691 __func__, sx);
692 }
693
694 /*
695 * Since we have been unable to acquire the exclusive
696 * lock and the exclusive waiters flag is set, we have
697 * to sleep.
698 */
699 if (LOCK_LOG_TEST(&sx->lock_object, 0))
700 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
701 __func__, sx);
702
703 #ifdef KDTRACE_HOOKS
704 sleep_time -= lockstat_nsecs(&sx->lock_object);
705 #endif
706 GIANT_SAVE();
707 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
708 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
709 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
710 if (!(opts & SX_INTERRUPTIBLE))
711 sleepq_wait(&sx->lock_object, 0);
712 else
713 error = sleepq_wait_sig(&sx->lock_object, 0);
714 #ifdef KDTRACE_HOOKS
715 sleep_time += lockstat_nsecs(&sx->lock_object);
716 sleep_cnt++;
717 #endif
718 if (error) {
719 if (LOCK_LOG_TEST(&sx->lock_object, 0))
720 CTR2(KTR_LOCK,
721 "%s: interruptible sleep by %p suspended by signal",
722 __func__, sx);
723 break;
724 }
725 if (LOCK_LOG_TEST(&sx->lock_object, 0))
726 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
727 __func__, sx);
728 x = SX_READ_VALUE(sx);
729 }
730 #ifdef KDTRACE_HOOKS
731 all_time += lockstat_nsecs(&sx->lock_object);
732 if (sleep_time)
733 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
734 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
735 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
736 if (lda.spin_cnt > sleep_cnt)
737 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
738 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
739 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
740 #endif
741 if (!error)
742 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
743 contested, waittime, file, line, LOCKSTAT_WRITER);
744 GIANT_RESTORE();
745 return (error);
746 }
747
748 /*
749 * This function represents the so-called 'hard case' for sx_xunlock
750 * operation. All 'easy case' failures are redirected to this. Note
751 * that ideally this would be a static function, but it needs to be
752 * accessible from at least sx.h.
753 */
754 void
755 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
756 {
757 uintptr_t x;
758 int queue, wakeup_swapper;
759
760 if (SCHEDULER_STOPPED())
761 return;
762
763 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
764
765 x = SX_READ_VALUE(sx);
766 if (x & SX_LOCK_RECURSED) {
767 /* The lock is recursed, unrecurse one level. */
768 if ((--sx->sx_recurse) == 0)
769 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
770 if (LOCK_LOG_TEST(&sx->lock_object, 0))
771 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
772 return;
773 }
774
775 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
776 if (x == tid &&
777 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
778 return;
779
780 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
781 SX_LOCK_EXCLUSIVE_WAITERS));
782 if (LOCK_LOG_TEST(&sx->lock_object, 0))
783 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
784
785 sleepq_lock(&sx->lock_object);
786 x = SX_LOCK_UNLOCKED;
787
788 /*
789 * The wake up algorithm here is quite simple and probably not
790 * ideal. It gives precedence to shared waiters if they are
791 * present. For this condition, we have to preserve the
792 * state of the exclusive waiters flag.
793 * If interruptible sleeps left the shared queue empty avoid a
794 * starvation for the threads sleeping on the exclusive queue by giving
795 * them precedence and cleaning up the shared waiters bit anyway.
796 */
797 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
798 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
799 queue = SQ_SHARED_QUEUE;
800 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
801 } else
802 queue = SQ_EXCLUSIVE_QUEUE;
803
804 /* Wake up all the waiters for the specific queue. */
805 if (LOCK_LOG_TEST(&sx->lock_object, 0))
806 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
807 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
808 "exclusive");
809 atomic_store_rel_ptr(&sx->sx_lock, x);
810 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
811 queue);
812 sleepq_release(&sx->lock_object);
813 if (wakeup_swapper)
814 kick_proc0();
815 }
816
817 static bool __always_inline
818 __sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line)
819 {
820
821 /*
822 * If no other thread has an exclusive lock then try to bump up
823 * the count of sharers. Since we have to preserve the state
824 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
825 * shared lock loop back and retry.
826 */
827 while (*xp & SX_LOCK_SHARED) {
828 MPASS(!(*xp & SX_LOCK_SHARED_WAITERS));
829 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
830 *xp + SX_ONE_SHARER)) {
831 if (LOCK_LOG_TEST(&sx->lock_object, 0))
832 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
833 __func__, sx, (void *)*xp,
834 (void *)(*xp + SX_ONE_SHARER));
835 return (true);
836 }
837 }
838 return (false);
839 }
840
841 static int __noinline
842 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
843 {
844 GIANT_DECLARE;
845 #ifdef ADAPTIVE_SX
846 volatile struct thread *owner;
847 #endif
848 #ifdef LOCK_PROFILING
849 uint64_t waittime = 0;
850 int contested = 0;
851 #endif
852 int error = 0;
853 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
854 struct lock_delay_arg lda;
855 #endif
856 #ifdef KDTRACE_HOOKS
857 uintptr_t state;
858 u_int sleep_cnt = 0;
859 int64_t sleep_time = 0;
860 int64_t all_time = 0;
861 #endif
862
863 if (SCHEDULER_STOPPED())
864 return (0);
865
866 #if defined(ADAPTIVE_SX)
867 lock_delay_arg_init(&lda, &sx_delay);
868 #elif defined(KDTRACE_HOOKS)
869 lock_delay_arg_init(&lda, NULL);
870 #endif
871 #ifdef KDTRACE_HOOKS
872 all_time -= lockstat_nsecs(&sx->lock_object);
873 state = x;
874 #endif
875
876 /*
877 * As with rwlocks, we don't make any attempt to try to block
878 * shared locks once there is an exclusive waiter.
879 */
880 for (;;) {
881 if (__sx_slock_try(sx, &x, file, line))
882 break;
883 #ifdef KDTRACE_HOOKS
884 lda.spin_cnt++;
885 #endif
886
887 #ifdef HWPMC_HOOKS
888 PMC_SOFT_CALL( , , lock, failed);
889 #endif
890 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
891 &waittime);
892
893 #ifdef ADAPTIVE_SX
894 /*
895 * If the owner is running on another CPU, spin until
896 * the owner stops running or the state of the lock
897 * changes.
898 */
899 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
900 owner = lv_sx_owner(x);
901 if (TD_IS_RUNNING(owner)) {
902 if (LOCK_LOG_TEST(&sx->lock_object, 0))
903 CTR3(KTR_LOCK,
904 "%s: spinning on %p held by %p",
905 __func__, sx, owner);
906 KTR_STATE1(KTR_SCHED, "thread",
907 sched_tdname(curthread), "spinning",
908 "lockname:\"%s\"", sx->lock_object.lo_name);
909 GIANT_SAVE();
910 do {
911 lock_delay(&lda);
912 x = SX_READ_VALUE(sx);
913 owner = lv_sx_owner(x);
914 } while (owner != NULL && TD_IS_RUNNING(owner));
915 KTR_STATE0(KTR_SCHED, "thread",
916 sched_tdname(curthread), "running");
917 continue;
918 }
919 }
920 #endif
921
922 /*
923 * Some other thread already has an exclusive lock, so
924 * start the process of blocking.
925 */
926 sleepq_lock(&sx->lock_object);
927 x = SX_READ_VALUE(sx);
928
929 /*
930 * The lock could have been released while we spun.
931 * In this case loop back and retry.
932 */
933 if (x & SX_LOCK_SHARED) {
934 sleepq_release(&sx->lock_object);
935 continue;
936 }
937
938 #ifdef ADAPTIVE_SX
939 /*
940 * If the owner is running on another CPU, spin until
941 * the owner stops running or the state of the lock
942 * changes.
943 */
944 if (!(x & SX_LOCK_SHARED) &&
945 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
946 owner = (struct thread *)SX_OWNER(x);
947 if (TD_IS_RUNNING(owner)) {
948 sleepq_release(&sx->lock_object);
949 x = SX_READ_VALUE(sx);
950 continue;
951 }
952 }
953 #endif
954
955 /*
956 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we
957 * fail to set it drop the sleep queue lock and loop
958 * back.
959 */
960 if (!(x & SX_LOCK_SHARED_WAITERS)) {
961 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
962 x | SX_LOCK_SHARED_WAITERS)) {
963 sleepq_release(&sx->lock_object);
964 x = SX_READ_VALUE(sx);
965 continue;
966 }
967 if (LOCK_LOG_TEST(&sx->lock_object, 0))
968 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
969 __func__, sx);
970 }
971
972 /*
973 * Since we have been unable to acquire the shared lock,
974 * we have to sleep.
975 */
976 if (LOCK_LOG_TEST(&sx->lock_object, 0))
977 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
978 __func__, sx);
979
980 #ifdef KDTRACE_HOOKS
981 sleep_time -= lockstat_nsecs(&sx->lock_object);
982 #endif
983 GIANT_SAVE();
984 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
985 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
986 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
987 if (!(opts & SX_INTERRUPTIBLE))
988 sleepq_wait(&sx->lock_object, 0);
989 else
990 error = sleepq_wait_sig(&sx->lock_object, 0);
991 #ifdef KDTRACE_HOOKS
992 sleep_time += lockstat_nsecs(&sx->lock_object);
993 sleep_cnt++;
994 #endif
995 if (error) {
996 if (LOCK_LOG_TEST(&sx->lock_object, 0))
997 CTR2(KTR_LOCK,
998 "%s: interruptible sleep by %p suspended by signal",
999 __func__, sx);
1000 break;
1001 }
1002 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1003 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1004 __func__, sx);
1005 x = SX_READ_VALUE(sx);
1006 }
1007 #ifdef KDTRACE_HOOKS
1008 all_time += lockstat_nsecs(&sx->lock_object);
1009 if (sleep_time)
1010 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1011 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1012 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1013 if (lda.spin_cnt > sleep_cnt)
1014 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1015 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1016 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1017 #endif
1018 if (error == 0) {
1019 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1020 contested, waittime, file, line, LOCKSTAT_READER);
1021 }
1022 GIANT_RESTORE();
1023 return (error);
1024 }
1025
1026 int
1027 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1028 {
1029 uintptr_t x;
1030 int error;
1031
1032 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1033 !TD_IS_IDLETHREAD(curthread),
1034 ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1035 curthread, sx->lock_object.lo_name, file, line));
1036 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1037 ("sx_slock() of destroyed sx @ %s:%d", file, line));
1038 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1039
1040 error = 0;
1041 x = SX_READ_VALUE(sx);
1042 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
1043 !__sx_slock_try(sx, &x, file, line)))
1044 error = _sx_slock_hard(sx, opts, file, line, x);
1045 if (error == 0) {
1046 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1047 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1048 TD_LOCKS_INC(curthread);
1049 }
1050 return (error);
1051 }
1052
1053 static bool __always_inline
1054 _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
1055 {
1056
1057 for (;;) {
1058 /*
1059 * We should never have sharers while at least one thread
1060 * holds a shared lock.
1061 */
1062 KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS),
1063 ("%s: waiting sharers", __func__));
1064
1065 /*
1066 * See if there is more than one shared lock held. If
1067 * so, just drop one and return.
1068 */
1069 if (SX_SHARERS(*xp) > 1) {
1070 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1071 *xp - SX_ONE_SHARER)) {
1072 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1073 CTR4(KTR_LOCK,
1074 "%s: %p succeeded %p -> %p",
1075 __func__, sx, (void *)*xp,
1076 (void *)(*xp - SX_ONE_SHARER));
1077 return (true);
1078 }
1079 continue;
1080 }
1081
1082 /*
1083 * If there aren't any waiters for an exclusive lock,
1084 * then try to drop it quickly.
1085 */
1086 if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) {
1087 MPASS(*xp == SX_SHARERS_LOCK(1));
1088 *xp = SX_SHARERS_LOCK(1);
1089 if (atomic_fcmpset_rel_ptr(&sx->sx_lock,
1090 xp, SX_LOCK_UNLOCKED)) {
1091 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1092 CTR2(KTR_LOCK, "%s: %p last succeeded",
1093 __func__, sx);
1094 return (true);
1095 }
1096 continue;
1097 }
1098 break;
1099 }
1100 return (false);
1101 }
1102
1103 static void __noinline
1104 _sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line)
1105 {
1106 int wakeup_swapper;
1107
1108 if (SCHEDULER_STOPPED())
1109 return;
1110
1111 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1112
1113 for (;;) {
1114 if (_sx_sunlock_try(sx, &x))
1115 break;
1116
1117 /*
1118 * At this point, there should just be one sharer with
1119 * exclusive waiters.
1120 */
1121 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1122
1123 sleepq_lock(&sx->lock_object);
1124
1125 /*
1126 * Wake up semantic here is quite simple:
1127 * Just wake up all the exclusive waiters.
1128 * Note that the state of the lock could have changed,
1129 * so if it fails loop back and retry.
1130 */
1131 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1132 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1133 SX_LOCK_UNLOCKED)) {
1134 sleepq_release(&sx->lock_object);
1135 x = SX_READ_VALUE(sx);
1136 continue;
1137 }
1138 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1139 CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1140 "exclusive queue", __func__, sx);
1141 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1142 0, SQ_EXCLUSIVE_QUEUE);
1143 sleepq_release(&sx->lock_object);
1144 if (wakeup_swapper)
1145 kick_proc0();
1146 break;
1147 }
1148 }
1149
1150 void
1151 _sx_sunlock(struct sx *sx, const char *file, int line)
1152 {
1153 uintptr_t x;
1154
1155 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1156 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1157 _sx_assert(sx, SA_SLOCKED, file, line);
1158 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1159 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1160
1161 x = SX_READ_VALUE(sx);
1162 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) ||
1163 !_sx_sunlock_try(sx, &x)))
1164 _sx_sunlock_hard(sx, x, file, line);
1165
1166 TD_LOCKS_DEC(curthread);
1167 }
1168
1169 #ifdef INVARIANT_SUPPORT
1170 #ifndef INVARIANTS
1171 #undef _sx_assert
1172 #endif
1173
1174 /*
1175 * In the non-WITNESS case, sx_assert() can only detect that at least
1176 * *some* thread owns an slock, but it cannot guarantee that *this*
1177 * thread owns an slock.
1178 */
1179 void
1180 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1181 {
1182 #ifndef WITNESS
1183 int slocked = 0;
1184 #endif
1185
1186 if (panicstr != NULL)
1187 return;
1188 switch (what) {
1189 case SA_SLOCKED:
1190 case SA_SLOCKED | SA_NOTRECURSED:
1191 case SA_SLOCKED | SA_RECURSED:
1192 #ifndef WITNESS
1193 slocked = 1;
1194 /* FALLTHROUGH */
1195 #endif
1196 case SA_LOCKED:
1197 case SA_LOCKED | SA_NOTRECURSED:
1198 case SA_LOCKED | SA_RECURSED:
1199 #ifdef WITNESS
1200 witness_assert(&sx->lock_object, what, file, line);
1201 #else
1202 /*
1203 * If some other thread has an exclusive lock or we
1204 * have one and are asserting a shared lock, fail.
1205 * Also, if no one has a lock at all, fail.
1206 */
1207 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1208 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1209 sx_xholder(sx) != curthread)))
1210 panic("Lock %s not %slocked @ %s:%d\n",
1211 sx->lock_object.lo_name, slocked ? "share " : "",
1212 file, line);
1213
1214 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1215 if (sx_recursed(sx)) {
1216 if (what & SA_NOTRECURSED)
1217 panic("Lock %s recursed @ %s:%d\n",
1218 sx->lock_object.lo_name, file,
1219 line);
1220 } else if (what & SA_RECURSED)
1221 panic("Lock %s not recursed @ %s:%d\n",
1222 sx->lock_object.lo_name, file, line);
1223 }
1224 #endif
1225 break;
1226 case SA_XLOCKED:
1227 case SA_XLOCKED | SA_NOTRECURSED:
1228 case SA_XLOCKED | SA_RECURSED:
1229 if (sx_xholder(sx) != curthread)
1230 panic("Lock %s not exclusively locked @ %s:%d\n",
1231 sx->lock_object.lo_name, file, line);
1232 if (sx_recursed(sx)) {
1233 if (what & SA_NOTRECURSED)
1234 panic("Lock %s recursed @ %s:%d\n",
1235 sx->lock_object.lo_name, file, line);
1236 } else if (what & SA_RECURSED)
1237 panic("Lock %s not recursed @ %s:%d\n",
1238 sx->lock_object.lo_name, file, line);
1239 break;
1240 case SA_UNLOCKED:
1241 #ifdef WITNESS
1242 witness_assert(&sx->lock_object, what, file, line);
1243 #else
1244 /*
1245 * If we hold an exclusve lock fail. We can't
1246 * reliably check to see if we hold a shared lock or
1247 * not.
1248 */
1249 if (sx_xholder(sx) == curthread)
1250 panic("Lock %s exclusively locked @ %s:%d\n",
1251 sx->lock_object.lo_name, file, line);
1252 #endif
1253 break;
1254 default:
1255 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1256 line);
1257 }
1258 }
1259 #endif /* INVARIANT_SUPPORT */
1260
1261 #ifdef DDB
1262 static void
1263 db_show_sx(const struct lock_object *lock)
1264 {
1265 struct thread *td;
1266 const struct sx *sx;
1267
1268 sx = (const struct sx *)lock;
1269
1270 db_printf(" state: ");
1271 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1272 db_printf("UNLOCKED\n");
1273 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1274 db_printf("DESTROYED\n");
1275 return;
1276 } else if (sx->sx_lock & SX_LOCK_SHARED)
1277 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1278 else {
1279 td = sx_xholder(sx);
1280 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1281 td->td_tid, td->td_proc->p_pid, td->td_name);
1282 if (sx_recursed(sx))
1283 db_printf(" recursed: %d\n", sx->sx_recurse);
1284 }
1285
1286 db_printf(" waiters: ");
1287 switch(sx->sx_lock &
1288 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1289 case SX_LOCK_SHARED_WAITERS:
1290 db_printf("shared\n");
1291 break;
1292 case SX_LOCK_EXCLUSIVE_WAITERS:
1293 db_printf("exclusive\n");
1294 break;
1295 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1296 db_printf("exclusive and shared\n");
1297 break;
1298 default:
1299 db_printf("none\n");
1300 }
1301 }
1302
1303 /*
1304 * Check to see if a thread that is blocked on a sleep queue is actually
1305 * blocked on an sx lock. If so, output some details and return true.
1306 * If the lock has an exclusive owner, return that in *ownerp.
1307 */
1308 int
1309 sx_chain(struct thread *td, struct thread **ownerp)
1310 {
1311 struct sx *sx;
1312
1313 /*
1314 * Check to see if this thread is blocked on an sx lock.
1315 * First, we check the lock class. If that is ok, then we
1316 * compare the lock name against the wait message.
1317 */
1318 sx = td->td_wchan;
1319 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1320 sx->lock_object.lo_name != td->td_wmesg)
1321 return (0);
1322
1323 /* We think we have an sx lock, so output some details. */
1324 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1325 *ownerp = sx_xholder(sx);
1326 if (sx->sx_lock & SX_LOCK_SHARED)
1327 db_printf("SLOCK (count %ju)\n",
1328 (uintmax_t)SX_SHARERS(sx->sx_lock));
1329 else
1330 db_printf("XLOCK\n");
1331 return (1);
1332 }
1333 #endif
Cache object: 69244defe8ce83ad6f8b0f11bb876243
|