FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c
1 /*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30 /*
31 * Shared/exclusive locks. This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39 #include "opt_ddb.h"
40 #include "opt_kdtrace.h"
41 #include "opt_no_adaptive_sx.h"
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/8.1/sys/kern/kern_sx.c 209002 2010-06-10 20:04:44Z jhb $");
45
46 #include <sys/param.h>
47 #include <sys/ktr.h>
48 #include <sys/linker_set.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/sleepqueue.h>
53 #include <sys/sx.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56
57 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
58 #include <machine/cpu.h>
59 #endif
60
61 #ifdef DDB
62 #include <ddb/ddb.h>
63 #endif
64
65 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
66 #define ADAPTIVE_SX
67 #endif
68
69 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
70
71 /* Handy macros for sleep queues. */
72 #define SQ_EXCLUSIVE_QUEUE 0
73 #define SQ_SHARED_QUEUE 1
74
75 #ifdef ADAPTIVE_SX
76 #define ASX_RETRIES 10
77 #define ASX_LOOPS 10000
78 #endif
79
80 /*
81 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
82 * drop Giant anytime we have to sleep or if we adaptively spin.
83 */
84 #define GIANT_DECLARE \
85 int _giantcnt = 0; \
86 WITNESS_SAVE_DECL(Giant) \
87
88 #define GIANT_SAVE() do { \
89 if (mtx_owned(&Giant)) { \
90 WITNESS_SAVE(&Giant.lock_object, Giant); \
91 while (mtx_owned(&Giant)) { \
92 _giantcnt++; \
93 mtx_unlock(&Giant); \
94 } \
95 } \
96 } while (0)
97
98 #define GIANT_RESTORE() do { \
99 if (_giantcnt > 0) { \
100 mtx_assert(&Giant, MA_NOTOWNED); \
101 while (_giantcnt--) \
102 mtx_lock(&Giant); \
103 WITNESS_RESTORE(&Giant.lock_object, Giant); \
104 } \
105 } while (0)
106
107 /*
108 * Returns true if an exclusive lock is recursed. It assumes
109 * curthread currently has an exclusive lock.
110 */
111 #define sx_recurse lock_object.lo_data
112 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
113
114 static void assert_sx(struct lock_object *lock, int what);
115 #ifdef DDB
116 static void db_show_sx(struct lock_object *lock);
117 #endif
118 static void lock_sx(struct lock_object *lock, int how);
119 #ifdef KDTRACE_HOOKS
120 static int owner_sx(struct lock_object *lock, struct thread **owner);
121 #endif
122 static int unlock_sx(struct lock_object *lock);
123
124 struct lock_class lock_class_sx = {
125 .lc_name = "sx",
126 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
127 .lc_assert = assert_sx,
128 #ifdef DDB
129 .lc_ddb_show = db_show_sx,
130 #endif
131 .lc_lock = lock_sx,
132 .lc_unlock = unlock_sx,
133 #ifdef KDTRACE_HOOKS
134 .lc_owner = owner_sx,
135 #endif
136 };
137
138 #ifndef INVARIANTS
139 #define _sx_assert(sx, what, file, line)
140 #endif
141
142 void
143 assert_sx(struct lock_object *lock, int what)
144 {
145
146 sx_assert((struct sx *)lock, what);
147 }
148
149 void
150 lock_sx(struct lock_object *lock, int how)
151 {
152 struct sx *sx;
153
154 sx = (struct sx *)lock;
155 if (how)
156 sx_xlock(sx);
157 else
158 sx_slock(sx);
159 }
160
161 int
162 unlock_sx(struct lock_object *lock)
163 {
164 struct sx *sx;
165
166 sx = (struct sx *)lock;
167 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
168 if (sx_xlocked(sx)) {
169 sx_xunlock(sx);
170 return (1);
171 } else {
172 sx_sunlock(sx);
173 return (0);
174 }
175 }
176
177 #ifdef KDTRACE_HOOKS
178 int
179 owner_sx(struct lock_object *lock, struct thread **owner)
180 {
181 struct sx *sx = (struct sx *)lock;
182 uintptr_t x = sx->sx_lock;
183
184 *owner = (struct thread *)SX_OWNER(x);
185 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
186 (*owner != NULL));
187 }
188 #endif
189
190 void
191 sx_sysinit(void *arg)
192 {
193 struct sx_args *sargs = arg;
194
195 sx_init(sargs->sa_sx, sargs->sa_desc);
196 }
197
198 void
199 sx_init_flags(struct sx *sx, const char *description, int opts)
200 {
201 int flags;
202
203 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
204 SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
205 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
206 ("%s: sx_lock not aligned for %s: %p", __func__, description,
207 &sx->sx_lock));
208
209 flags = LO_SLEEPABLE | LO_UPGRADABLE;
210 if (opts & SX_DUPOK)
211 flags |= LO_DUPOK;
212 if (opts & SX_NOPROFILE)
213 flags |= LO_NOPROFILE;
214 if (!(opts & SX_NOWITNESS))
215 flags |= LO_WITNESS;
216 if (opts & SX_RECURSE)
217 flags |= LO_RECURSABLE;
218 if (opts & SX_QUIET)
219 flags |= LO_QUIET;
220
221 flags |= opts & SX_NOADAPTIVE;
222 sx->sx_lock = SX_LOCK_UNLOCKED;
223 sx->sx_recurse = 0;
224 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
225 }
226
227 void
228 sx_destroy(struct sx *sx)
229 {
230
231 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
232 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
233 sx->sx_lock = SX_LOCK_DESTROYED;
234 lock_destroy(&sx->lock_object);
235 }
236
237 int
238 _sx_slock(struct sx *sx, int opts, const char *file, int line)
239 {
240 int error = 0;
241
242 MPASS(curthread != NULL);
243 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
244 ("sx_slock() of destroyed sx @ %s:%d", file, line));
245 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
246 error = __sx_slock(sx, opts, file, line);
247 if (!error) {
248 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
249 WITNESS_LOCK(&sx->lock_object, 0, file, line);
250 curthread->td_locks++;
251 }
252
253 return (error);
254 }
255
256 int
257 _sx_try_slock(struct sx *sx, const char *file, int line)
258 {
259 uintptr_t x;
260
261 for (;;) {
262 x = sx->sx_lock;
263 KASSERT(x != SX_LOCK_DESTROYED,
264 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
265 if (!(x & SX_LOCK_SHARED))
266 break;
267 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
268 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
269 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
270 curthread->td_locks++;
271 return (1);
272 }
273 }
274
275 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
276 return (0);
277 }
278
279 int
280 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
281 {
282 int error = 0;
283
284 MPASS(curthread != NULL);
285 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
286 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
287 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
288 line, NULL);
289 error = __sx_xlock(sx, curthread, opts, file, line);
290 if (!error) {
291 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
292 file, line);
293 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
294 curthread->td_locks++;
295 }
296
297 return (error);
298 }
299
300 int
301 _sx_try_xlock(struct sx *sx, const char *file, int line)
302 {
303 int rval;
304
305 MPASS(curthread != NULL);
306 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
307 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
308
309 if (sx_xlocked(sx) &&
310 (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
311 sx->sx_recurse++;
312 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
313 rval = 1;
314 } else
315 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
316 (uintptr_t)curthread);
317 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
318 if (rval) {
319 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
320 file, line);
321 curthread->td_locks++;
322 }
323
324 return (rval);
325 }
326
327 void
328 _sx_sunlock(struct sx *sx, const char *file, int line)
329 {
330
331 MPASS(curthread != NULL);
332 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
333 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
334 _sx_assert(sx, SA_SLOCKED, file, line);
335 curthread->td_locks--;
336 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
337 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
338 __sx_sunlock(sx, file, line);
339 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
340 }
341
342 void
343 _sx_xunlock(struct sx *sx, const char *file, int line)
344 {
345
346 MPASS(curthread != NULL);
347 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
348 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
349 _sx_assert(sx, SA_XLOCKED, file, line);
350 curthread->td_locks--;
351 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
352 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
353 line);
354 if (!sx_recursed(sx))
355 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
356 __sx_xunlock(sx, curthread, file, line);
357 }
358
359 /*
360 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
361 * This will only succeed if this thread holds a single shared lock.
362 * Return 1 if if the upgrade succeed, 0 otherwise.
363 */
364 int
365 _sx_try_upgrade(struct sx *sx, const char *file, int line)
366 {
367 uintptr_t x;
368 int success;
369
370 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
371 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
372 _sx_assert(sx, SA_SLOCKED, file, line);
373
374 /*
375 * Try to switch from one shared lock to an exclusive lock. We need
376 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
377 * we will wake up the exclusive waiters when we drop the lock.
378 */
379 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
380 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
381 (uintptr_t)curthread | x);
382 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
383 if (success) {
384 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
385 file, line);
386 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
387 }
388 return (success);
389 }
390
391 /*
392 * Downgrade an unrecursed exclusive lock into a single shared lock.
393 */
394 void
395 _sx_downgrade(struct sx *sx, const char *file, int line)
396 {
397 uintptr_t x;
398 int wakeup_swapper;
399
400 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
401 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
402 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
403 #ifndef INVARIANTS
404 if (sx_recursed(sx))
405 panic("downgrade of a recursed lock");
406 #endif
407
408 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
409
410 /*
411 * Try to switch from an exclusive lock with no shared waiters
412 * to one sharer with no shared waiters. If there are
413 * exclusive waiters, we don't need to lock the sleep queue so
414 * long as we preserve the flag. We do one quick try and if
415 * that fails we grab the sleepq lock to keep the flags from
416 * changing and do it the slow way.
417 *
418 * We have to lock the sleep queue if there are shared waiters
419 * so we can wake them up.
420 */
421 x = sx->sx_lock;
422 if (!(x & SX_LOCK_SHARED_WAITERS) &&
423 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
424 (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
425 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
426 return;
427 }
428
429 /*
430 * Lock the sleep queue so we can read the waiters bits
431 * without any races and wakeup any shared waiters.
432 */
433 sleepq_lock(&sx->lock_object);
434
435 /*
436 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
437 * shared lock. If there are any shared waiters, wake them up.
438 */
439 wakeup_swapper = 0;
440 x = sx->sx_lock;
441 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
442 (x & SX_LOCK_EXCLUSIVE_WAITERS));
443 if (x & SX_LOCK_SHARED_WAITERS)
444 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
445 0, SQ_SHARED_QUEUE);
446 sleepq_release(&sx->lock_object);
447
448 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
449 LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
450
451 if (wakeup_swapper)
452 kick_proc0();
453 }
454
455 /*
456 * This function represents the so-called 'hard case' for sx_xlock
457 * operation. All 'easy case' failures are redirected to this. Note
458 * that ideally this would be a static function, but it needs to be
459 * accessible from at least sx.h.
460 */
461 int
462 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
463 int line)
464 {
465 GIANT_DECLARE;
466 #ifdef ADAPTIVE_SX
467 volatile struct thread *owner;
468 u_int i, spintries = 0;
469 #endif
470 uintptr_t x;
471 #ifdef LOCK_PROFILING
472 uint64_t waittime = 0;
473 int contested = 0;
474 #endif
475 int error = 0;
476 #ifdef KDTRACE_HOOKS
477 uint64_t spin_cnt = 0;
478 uint64_t sleep_cnt = 0;
479 int64_t sleep_time = 0;
480 #endif
481
482 /* If we already hold an exclusive lock, then recurse. */
483 if (sx_xlocked(sx)) {
484 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
485 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
486 sx->lock_object.lo_name, file, line));
487 sx->sx_recurse++;
488 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
489 if (LOCK_LOG_TEST(&sx->lock_object, 0))
490 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
491 return (0);
492 }
493
494 if (LOCK_LOG_TEST(&sx->lock_object, 0))
495 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
496 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
497
498 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
499 #ifdef KDTRACE_HOOKS
500 spin_cnt++;
501 #endif
502 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
503 &waittime);
504 #ifdef ADAPTIVE_SX
505 /*
506 * If the lock is write locked and the owner is
507 * running on another CPU, spin until the owner stops
508 * running or the state of the lock changes.
509 */
510 x = sx->sx_lock;
511 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
512 if ((x & SX_LOCK_SHARED) == 0) {
513 x = SX_OWNER(x);
514 owner = (struct thread *)x;
515 if (TD_IS_RUNNING(owner)) {
516 if (LOCK_LOG_TEST(&sx->lock_object, 0))
517 CTR3(KTR_LOCK,
518 "%s: spinning on %p held by %p",
519 __func__, sx, owner);
520 GIANT_SAVE();
521 while (SX_OWNER(sx->sx_lock) == x &&
522 TD_IS_RUNNING(owner)) {
523 cpu_spinwait();
524 #ifdef KDTRACE_HOOKS
525 spin_cnt++;
526 #endif
527 }
528 continue;
529 }
530 } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
531 GIANT_SAVE();
532 spintries++;
533 for (i = 0; i < ASX_LOOPS; i++) {
534 if (LOCK_LOG_TEST(&sx->lock_object, 0))
535 CTR4(KTR_LOCK,
536 "%s: shared spinning on %p with %u and %u",
537 __func__, sx, spintries, i);
538 x = sx->sx_lock;
539 if ((x & SX_LOCK_SHARED) == 0 ||
540 SX_SHARERS(x) == 0)
541 break;
542 cpu_spinwait();
543 #ifdef KDTRACE_HOOKS
544 spin_cnt++;
545 #endif
546 }
547 if (i != ASX_LOOPS)
548 continue;
549 }
550 }
551 #endif
552
553 sleepq_lock(&sx->lock_object);
554 x = sx->sx_lock;
555
556 /*
557 * If the lock was released while spinning on the
558 * sleep queue chain lock, try again.
559 */
560 if (x == SX_LOCK_UNLOCKED) {
561 sleepq_release(&sx->lock_object);
562 continue;
563 }
564
565 #ifdef ADAPTIVE_SX
566 /*
567 * The current lock owner might have started executing
568 * on another CPU (or the lock could have changed
569 * owners) while we were waiting on the sleep queue
570 * chain lock. If so, drop the sleep queue lock and try
571 * again.
572 */
573 if (!(x & SX_LOCK_SHARED) &&
574 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
575 owner = (struct thread *)SX_OWNER(x);
576 if (TD_IS_RUNNING(owner)) {
577 sleepq_release(&sx->lock_object);
578 continue;
579 }
580 }
581 #endif
582
583 /*
584 * If an exclusive lock was released with both shared
585 * and exclusive waiters and a shared waiter hasn't
586 * woken up and acquired the lock yet, sx_lock will be
587 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
588 * If we see that value, try to acquire it once. Note
589 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
590 * as there are other exclusive waiters still. If we
591 * fail, restart the loop.
592 */
593 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
594 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
595 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
596 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
597 sleepq_release(&sx->lock_object);
598 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
599 __func__, sx);
600 break;
601 }
602 sleepq_release(&sx->lock_object);
603 continue;
604 }
605
606 /*
607 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail,
608 * than loop back and retry.
609 */
610 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
611 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
612 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
613 sleepq_release(&sx->lock_object);
614 continue;
615 }
616 if (LOCK_LOG_TEST(&sx->lock_object, 0))
617 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
618 __func__, sx);
619 }
620
621 /*
622 * Since we have been unable to acquire the exclusive
623 * lock and the exclusive waiters flag is set, we have
624 * to sleep.
625 */
626 if (LOCK_LOG_TEST(&sx->lock_object, 0))
627 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
628 __func__, sx);
629
630 #ifdef KDTRACE_HOOKS
631 sleep_time -= lockstat_nsecs();
632 #endif
633 GIANT_SAVE();
634 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
635 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
636 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
637 if (!(opts & SX_INTERRUPTIBLE))
638 sleepq_wait(&sx->lock_object, 0);
639 else
640 error = sleepq_wait_sig(&sx->lock_object, 0);
641 #ifdef KDTRACE_HOOKS
642 sleep_time += lockstat_nsecs();
643 sleep_cnt++;
644 #endif
645 if (error) {
646 if (LOCK_LOG_TEST(&sx->lock_object, 0))
647 CTR2(KTR_LOCK,
648 "%s: interruptible sleep by %p suspended by signal",
649 __func__, sx);
650 break;
651 }
652 if (LOCK_LOG_TEST(&sx->lock_object, 0))
653 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
654 __func__, sx);
655 }
656
657 GIANT_RESTORE();
658 if (!error)
659 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
660 contested, waittime, file, line);
661 #ifdef KDTRACE_HOOKS
662 if (sleep_time)
663 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
664 if (spin_cnt > sleep_cnt)
665 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
666 #endif
667 return (error);
668 }
669
670 /*
671 * This function represents the so-called 'hard case' for sx_xunlock
672 * operation. All 'easy case' failures are redirected to this. Note
673 * that ideally this would be a static function, but it needs to be
674 * accessible from at least sx.h.
675 */
676 void
677 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
678 {
679 uintptr_t x;
680 int queue, wakeup_swapper;
681
682 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
683
684 /* If the lock is recursed, then unrecurse one level. */
685 if (sx_xlocked(sx) && sx_recursed(sx)) {
686 if ((--sx->sx_recurse) == 0)
687 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
688 if (LOCK_LOG_TEST(&sx->lock_object, 0))
689 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
690 return;
691 }
692 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
693 SX_LOCK_EXCLUSIVE_WAITERS));
694 if (LOCK_LOG_TEST(&sx->lock_object, 0))
695 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
696
697 sleepq_lock(&sx->lock_object);
698 x = SX_LOCK_UNLOCKED;
699
700 /*
701 * The wake up algorithm here is quite simple and probably not
702 * ideal. It gives precedence to shared waiters if they are
703 * present. For this condition, we have to preserve the
704 * state of the exclusive waiters flag.
705 * If interruptible sleeps left the shared queue empty avoid a
706 * starvation for the threads sleeping on the exclusive queue by giving
707 * them precedence and cleaning up the shared waiters bit anyway.
708 */
709 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
710 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
711 queue = SQ_SHARED_QUEUE;
712 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
713 } else
714 queue = SQ_EXCLUSIVE_QUEUE;
715
716 /* Wake up all the waiters for the specific queue. */
717 if (LOCK_LOG_TEST(&sx->lock_object, 0))
718 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
719 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
720 "exclusive");
721 atomic_store_rel_ptr(&sx->sx_lock, x);
722 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
723 queue);
724 sleepq_release(&sx->lock_object);
725 if (wakeup_swapper)
726 kick_proc0();
727 }
728
729 /*
730 * This function represents the so-called 'hard case' for sx_slock
731 * operation. All 'easy case' failures are redirected to this. Note
732 * that ideally this would be a static function, but it needs to be
733 * accessible from at least sx.h.
734 */
735 int
736 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
737 {
738 GIANT_DECLARE;
739 #ifdef ADAPTIVE_SX
740 volatile struct thread *owner;
741 #endif
742 #ifdef LOCK_PROFILING
743 uint64_t waittime = 0;
744 int contested = 0;
745 #endif
746 uintptr_t x;
747 int error = 0;
748 #ifdef KDTRACE_HOOKS
749 uint64_t spin_cnt = 0;
750 uint64_t sleep_cnt = 0;
751 int64_t sleep_time = 0;
752 #endif
753
754 /*
755 * As with rwlocks, we don't make any attempt to try to block
756 * shared locks once there is an exclusive waiter.
757 */
758 for (;;) {
759 #ifdef KDTRACE_HOOKS
760 spin_cnt++;
761 #endif
762 x = sx->sx_lock;
763
764 /*
765 * If no other thread has an exclusive lock then try to bump up
766 * the count of sharers. Since we have to preserve the state
767 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
768 * shared lock loop back and retry.
769 */
770 if (x & SX_LOCK_SHARED) {
771 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
772 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
773 x + SX_ONE_SHARER)) {
774 if (LOCK_LOG_TEST(&sx->lock_object, 0))
775 CTR4(KTR_LOCK,
776 "%s: %p succeed %p -> %p", __func__,
777 sx, (void *)x,
778 (void *)(x + SX_ONE_SHARER));
779 break;
780 }
781 continue;
782 }
783 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
784 &waittime);
785
786 #ifdef ADAPTIVE_SX
787 /*
788 * If the owner is running on another CPU, spin until
789 * the owner stops running or the state of the lock
790 * changes.
791 */
792 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
793 x = SX_OWNER(x);
794 owner = (struct thread *)x;
795 if (TD_IS_RUNNING(owner)) {
796 if (LOCK_LOG_TEST(&sx->lock_object, 0))
797 CTR3(KTR_LOCK,
798 "%s: spinning on %p held by %p",
799 __func__, sx, owner);
800 GIANT_SAVE();
801 while (SX_OWNER(sx->sx_lock) == x &&
802 TD_IS_RUNNING(owner)) {
803 #ifdef KDTRACE_HOOKS
804 spin_cnt++;
805 #endif
806 cpu_spinwait();
807 }
808 continue;
809 }
810 }
811 #endif
812
813 /*
814 * Some other thread already has an exclusive lock, so
815 * start the process of blocking.
816 */
817 sleepq_lock(&sx->lock_object);
818 x = sx->sx_lock;
819
820 /*
821 * The lock could have been released while we spun.
822 * In this case loop back and retry.
823 */
824 if (x & SX_LOCK_SHARED) {
825 sleepq_release(&sx->lock_object);
826 continue;
827 }
828
829 #ifdef ADAPTIVE_SX
830 /*
831 * If the owner is running on another CPU, spin until
832 * the owner stops running or the state of the lock
833 * changes.
834 */
835 if (!(x & SX_LOCK_SHARED) &&
836 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
837 owner = (struct thread *)SX_OWNER(x);
838 if (TD_IS_RUNNING(owner)) {
839 sleepq_release(&sx->lock_object);
840 continue;
841 }
842 }
843 #endif
844
845 /*
846 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we
847 * fail to set it drop the sleep queue lock and loop
848 * back.
849 */
850 if (!(x & SX_LOCK_SHARED_WAITERS)) {
851 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
852 x | SX_LOCK_SHARED_WAITERS)) {
853 sleepq_release(&sx->lock_object);
854 continue;
855 }
856 if (LOCK_LOG_TEST(&sx->lock_object, 0))
857 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
858 __func__, sx);
859 }
860
861 /*
862 * Since we have been unable to acquire the shared lock,
863 * we have to sleep.
864 */
865 if (LOCK_LOG_TEST(&sx->lock_object, 0))
866 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
867 __func__, sx);
868
869 #ifdef KDTRACE_HOOKS
870 sleep_time -= lockstat_nsecs();
871 #endif
872 GIANT_SAVE();
873 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
874 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
875 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
876 if (!(opts & SX_INTERRUPTIBLE))
877 sleepq_wait(&sx->lock_object, 0);
878 else
879 error = sleepq_wait_sig(&sx->lock_object, 0);
880 #ifdef KDTRACE_HOOKS
881 sleep_time += lockstat_nsecs();
882 sleep_cnt++;
883 #endif
884 if (error) {
885 if (LOCK_LOG_TEST(&sx->lock_object, 0))
886 CTR2(KTR_LOCK,
887 "%s: interruptible sleep by %p suspended by signal",
888 __func__, sx);
889 break;
890 }
891 if (LOCK_LOG_TEST(&sx->lock_object, 0))
892 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
893 __func__, sx);
894 }
895 if (error == 0)
896 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
897 contested, waittime, file, line);
898 #ifdef KDTRACE_HOOKS
899 if (sleep_time)
900 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
901 if (spin_cnt > sleep_cnt)
902 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
903 #endif
904 GIANT_RESTORE();
905 return (error);
906 }
907
908 /*
909 * This function represents the so-called 'hard case' for sx_sunlock
910 * operation. All 'easy case' failures are redirected to this. Note
911 * that ideally this would be a static function, but it needs to be
912 * accessible from at least sx.h.
913 */
914 void
915 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
916 {
917 uintptr_t x;
918 int wakeup_swapper;
919
920 for (;;) {
921 x = sx->sx_lock;
922
923 /*
924 * We should never have sharers while at least one thread
925 * holds a shared lock.
926 */
927 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
928 ("%s: waiting sharers", __func__));
929
930 /*
931 * See if there is more than one shared lock held. If
932 * so, just drop one and return.
933 */
934 if (SX_SHARERS(x) > 1) {
935 if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
936 x - SX_ONE_SHARER)) {
937 if (LOCK_LOG_TEST(&sx->lock_object, 0))
938 CTR4(KTR_LOCK,
939 "%s: %p succeeded %p -> %p",
940 __func__, sx, (void *)x,
941 (void *)(x - SX_ONE_SHARER));
942 break;
943 }
944 continue;
945 }
946
947 /*
948 * If there aren't any waiters for an exclusive lock,
949 * then try to drop it quickly.
950 */
951 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
952 MPASS(x == SX_SHARERS_LOCK(1));
953 if (atomic_cmpset_rel_ptr(&sx->sx_lock,
954 SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
955 if (LOCK_LOG_TEST(&sx->lock_object, 0))
956 CTR2(KTR_LOCK, "%s: %p last succeeded",
957 __func__, sx);
958 break;
959 }
960 continue;
961 }
962
963 /*
964 * At this point, there should just be one sharer with
965 * exclusive waiters.
966 */
967 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
968
969 sleepq_lock(&sx->lock_object);
970
971 /*
972 * Wake up semantic here is quite simple:
973 * Just wake up all the exclusive waiters.
974 * Note that the state of the lock could have changed,
975 * so if it fails loop back and retry.
976 */
977 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
978 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
979 SX_LOCK_UNLOCKED)) {
980 sleepq_release(&sx->lock_object);
981 continue;
982 }
983 if (LOCK_LOG_TEST(&sx->lock_object, 0))
984 CTR2(KTR_LOCK, "%s: %p waking up all thread on"
985 "exclusive queue", __func__, sx);
986 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
987 0, SQ_EXCLUSIVE_QUEUE);
988 sleepq_release(&sx->lock_object);
989 if (wakeup_swapper)
990 kick_proc0();
991 break;
992 }
993 }
994
995 #ifdef INVARIANT_SUPPORT
996 #ifndef INVARIANTS
997 #undef _sx_assert
998 #endif
999
1000 /*
1001 * In the non-WITNESS case, sx_assert() can only detect that at least
1002 * *some* thread owns an slock, but it cannot guarantee that *this*
1003 * thread owns an slock.
1004 */
1005 void
1006 _sx_assert(struct sx *sx, int what, const char *file, int line)
1007 {
1008 #ifndef WITNESS
1009 int slocked = 0;
1010 #endif
1011
1012 if (panicstr != NULL)
1013 return;
1014 switch (what) {
1015 case SA_SLOCKED:
1016 case SA_SLOCKED | SA_NOTRECURSED:
1017 case SA_SLOCKED | SA_RECURSED:
1018 #ifndef WITNESS
1019 slocked = 1;
1020 /* FALLTHROUGH */
1021 #endif
1022 case SA_LOCKED:
1023 case SA_LOCKED | SA_NOTRECURSED:
1024 case SA_LOCKED | SA_RECURSED:
1025 #ifdef WITNESS
1026 witness_assert(&sx->lock_object, what, file, line);
1027 #else
1028 /*
1029 * If some other thread has an exclusive lock or we
1030 * have one and are asserting a shared lock, fail.
1031 * Also, if no one has a lock at all, fail.
1032 */
1033 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1034 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1035 sx_xholder(sx) != curthread)))
1036 panic("Lock %s not %slocked @ %s:%d\n",
1037 sx->lock_object.lo_name, slocked ? "share " : "",
1038 file, line);
1039
1040 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1041 if (sx_recursed(sx)) {
1042 if (what & SA_NOTRECURSED)
1043 panic("Lock %s recursed @ %s:%d\n",
1044 sx->lock_object.lo_name, file,
1045 line);
1046 } else if (what & SA_RECURSED)
1047 panic("Lock %s not recursed @ %s:%d\n",
1048 sx->lock_object.lo_name, file, line);
1049 }
1050 #endif
1051 break;
1052 case SA_XLOCKED:
1053 case SA_XLOCKED | SA_NOTRECURSED:
1054 case SA_XLOCKED | SA_RECURSED:
1055 if (sx_xholder(sx) != curthread)
1056 panic("Lock %s not exclusively locked @ %s:%d\n",
1057 sx->lock_object.lo_name, file, line);
1058 if (sx_recursed(sx)) {
1059 if (what & SA_NOTRECURSED)
1060 panic("Lock %s recursed @ %s:%d\n",
1061 sx->lock_object.lo_name, file, line);
1062 } else if (what & SA_RECURSED)
1063 panic("Lock %s not recursed @ %s:%d\n",
1064 sx->lock_object.lo_name, file, line);
1065 break;
1066 case SA_UNLOCKED:
1067 #ifdef WITNESS
1068 witness_assert(&sx->lock_object, what, file, line);
1069 #else
1070 /*
1071 * If we hold an exclusve lock fail. We can't
1072 * reliably check to see if we hold a shared lock or
1073 * not.
1074 */
1075 if (sx_xholder(sx) == curthread)
1076 panic("Lock %s exclusively locked @ %s:%d\n",
1077 sx->lock_object.lo_name, file, line);
1078 #endif
1079 break;
1080 default:
1081 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1082 line);
1083 }
1084 }
1085 #endif /* INVARIANT_SUPPORT */
1086
1087 #ifdef DDB
1088 static void
1089 db_show_sx(struct lock_object *lock)
1090 {
1091 struct thread *td;
1092 struct sx *sx;
1093
1094 sx = (struct sx *)lock;
1095
1096 db_printf(" state: ");
1097 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1098 db_printf("UNLOCKED\n");
1099 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1100 db_printf("DESTROYED\n");
1101 return;
1102 } else if (sx->sx_lock & SX_LOCK_SHARED)
1103 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1104 else {
1105 td = sx_xholder(sx);
1106 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1107 td->td_tid, td->td_proc->p_pid, td->td_name);
1108 if (sx_recursed(sx))
1109 db_printf(" recursed: %d\n", sx->sx_recurse);
1110 }
1111
1112 db_printf(" waiters: ");
1113 switch(sx->sx_lock &
1114 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1115 case SX_LOCK_SHARED_WAITERS:
1116 db_printf("shared\n");
1117 break;
1118 case SX_LOCK_EXCLUSIVE_WAITERS:
1119 db_printf("exclusive\n");
1120 break;
1121 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1122 db_printf("exclusive and shared\n");
1123 break;
1124 default:
1125 db_printf("none\n");
1126 }
1127 }
1128
1129 /*
1130 * Check to see if a thread that is blocked on a sleep queue is actually
1131 * blocked on an sx lock. If so, output some details and return true.
1132 * If the lock has an exclusive owner, return that in *ownerp.
1133 */
1134 int
1135 sx_chain(struct thread *td, struct thread **ownerp)
1136 {
1137 struct sx *sx;
1138
1139 /*
1140 * Check to see if this thread is blocked on an sx lock.
1141 * First, we check the lock class. If that is ok, then we
1142 * compare the lock name against the wait message.
1143 */
1144 sx = td->td_wchan;
1145 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1146 sx->lock_object.lo_name != td->td_wmesg)
1147 return (0);
1148
1149 /* We think we have an sx lock, so output some details. */
1150 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1151 *ownerp = sx_xholder(sx);
1152 if (sx->sx_lock & SX_LOCK_SHARED)
1153 db_printf("SLOCK (count %ju)\n",
1154 (uintmax_t)SX_SHARERS(sx->sx_lock));
1155 else
1156 db_printf("XLOCK\n");
1157 return (1);
1158 }
1159 #endif
Cache object: 9778e79e137e5ea307bfe1770d2c06bd
|