FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c
1 /*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30 /*
31 * Shared/exclusive locks. This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39 #include "opt_adaptive_sx.h"
40 #include "opt_ddb.h"
41
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD: releng/6.3/sys/kern/kern_sx.c 173886 2007-11-24 19:45:58Z cvs2svn $");
44
45 #include <sys/param.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/sleepqueue.h>
51 #include <sys/sx.h>
52 #include <sys/systm.h>
53
54 #ifdef ADAPTIVE_SX
55 #include <machine/cpu.h>
56 #endif
57
58 #ifdef DDB
59 #include <ddb/ddb.h>
60 #endif
61
62 #if !defined(SMP) && defined(ADAPTIVE_SX)
63 #error "You must have SMP to enable the ADAPTIVE_SX option"
64 #endif
65
66 CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) ==
67 (SX_ADAPTIVESPIN | SX_RECURSE));
68
69 /* Handy macros for sleep queues. */
70 #define SQ_EXCLUSIVE_QUEUE 0
71 #define SQ_SHARED_QUEUE 1
72
73 /*
74 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
75 * drop Giant anytime we have to sleep or if we adaptively spin.
76 */
77 #define GIANT_DECLARE \
78 int _giantcnt = 0; \
79 WITNESS_SAVE_DECL(Giant) \
80
81 #define GIANT_SAVE() do { \
82 if (mtx_owned(&Giant)) { \
83 WITNESS_SAVE(&Giant.mtx_object, Giant); \
84 while (mtx_owned(&Giant)) { \
85 _giantcnt++; \
86 mtx_unlock(&Giant); \
87 } \
88 } \
89 } while (0)
90
91 #define GIANT_RESTORE() do { \
92 if (_giantcnt > 0) { \
93 mtx_assert(&Giant, MA_NOTOWNED); \
94 while (_giantcnt--) \
95 mtx_lock(&Giant); \
96 WITNESS_RESTORE(&Giant.mtx_object, Giant); \
97 } \
98 } while (0)
99
100 /*
101 * Returns true if an exclusive lock is recursed. It assumes
102 * curthread currently has an exclusive lock.
103 */
104 #define sx_recursed(sx) ((sx)->sx_recurse != 0)
105
106 #ifdef DDB
107 static void db_show_sx(struct lock_object *lock);
108 #endif
109
110 struct lock_class lock_class_sx = {
111 .lc_name = "sx",
112 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
113 #ifdef DDB
114 .lc_ddb_show = db_show_sx,
115 #endif
116 };
117
118 #ifndef INVARIANTS
119 #define _sx_assert(sx, what, file, line)
120 #endif
121
122 void
123 sx_sysinit(void *arg)
124 {
125 struct sx_args *sargs = arg;
126
127 sx_init(sargs->sa_sx, sargs->sa_desc);
128 }
129
130 void
131 sx_init(struct sx *sx, const char *description)
132 {
133
134 sx_init_flags(sx, description, 0);
135 }
136
137 void
138 sx_init_flags(struct sx *sx, const char *description, int opts)
139 {
140 int flags;
141
142 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
143 SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
144
145 flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
146 if (opts & SX_DUPOK)
147 flags |= LO_DUPOK;
148 if (!(opts & SX_NOWITNESS))
149 flags |= LO_WITNESS;
150 if (opts & SX_QUIET)
151 flags |= LO_QUIET;
152
153 flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
154 sx->sx_lock = SX_LOCK_UNLOCKED;
155 sx->sx_recurse = 0;
156 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
157 }
158
159 void
160 sx_destroy(struct sx *sx)
161 {
162
163 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
164 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
165 sx->sx_lock = SX_LOCK_DESTROYED;
166 lock_destroy(&sx->lock_object);
167 }
168
169 int
170 _sx_slock(struct sx *sx, int opts, const char *file, int line)
171 {
172 int error = 0;
173
174 MPASS(curthread != NULL);
175 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
176 ("sx_slock() of destroyed sx @ %s:%d", file, line));
177 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
178 error = __sx_slock(sx, opts, file, line);
179 if (!error) {
180 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
181 WITNESS_LOCK(&sx->lock_object, 0, file, line);
182 curthread->td_locks++;
183 }
184
185 return (error);
186 }
187
188 int
189 _sx_try_slock(struct sx *sx, const char *file, int line)
190 {
191 uintptr_t x;
192
193 x = sx->sx_lock;
194 KASSERT(x != SX_LOCK_DESTROYED,
195 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
196 if ((x & SX_LOCK_SHARED) && atomic_cmpset_acq_ptr(&sx->sx_lock, x,
197 x + SX_ONE_SHARER)) {
198 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
199 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
200 curthread->td_locks++;
201 return (1);
202 }
203
204 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
205 return (0);
206 }
207
208 int
209 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
210 {
211 int error = 0;
212
213 MPASS(curthread != NULL);
214 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
215 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
216 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
217 line);
218 error = __sx_xlock(sx, curthread, opts, file, line);
219 if (!error) {
220 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
221 file, line);
222 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
223 curthread->td_locks++;
224 }
225
226 return (error);
227 }
228
229 int
230 _sx_try_xlock(struct sx *sx, const char *file, int line)
231 {
232 int rval;
233
234 MPASS(curthread != NULL);
235 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
236 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
237
238 if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) {
239 sx->sx_recurse++;
240 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
241 rval = 1;
242 } else
243 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
244 (uintptr_t)curthread);
245 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
246 if (rval) {
247 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
248 file, line);
249 curthread->td_locks++;
250 }
251
252 return (rval);
253 }
254
255 void
256 _sx_sunlock(struct sx *sx, const char *file, int line)
257 {
258
259 MPASS(curthread != NULL);
260 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
261 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
262 _sx_assert(sx, SA_SLOCKED, file, line);
263 curthread->td_locks--;
264 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
265 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
266 #ifdef LOCK_PROFILING_SHARED
267 if (SX_SHARERS(sx->sx_lock) == 1)
268 lock_profile_release_lock(&sx->lock_object);
269 #endif
270 __sx_sunlock(sx, file, line);
271 }
272
273 void
274 _sx_xunlock(struct sx *sx, const char *file, int line)
275 {
276
277 MPASS(curthread != NULL);
278 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
279 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
280 _sx_assert(sx, SA_XLOCKED, file, line);
281 curthread->td_locks--;
282 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
283 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
284 line);
285 if (!sx_recursed(sx))
286 lock_profile_release_lock(&sx->lock_object);
287 __sx_xunlock(sx, curthread, file, line);
288 }
289
290 /*
291 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
292 * This will only succeed if this thread holds a single shared lock.
293 * Return 1 if if the upgrade succeed, 0 otherwise.
294 */
295 int
296 _sx_try_upgrade(struct sx *sx, const char *file, int line)
297 {
298 uintptr_t x;
299 int success;
300
301 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
302 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
303 _sx_assert(sx, SA_SLOCKED, file, line);
304
305 /*
306 * Try to switch from one shared lock to an exclusive lock. We need
307 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
308 * we will wake up the exclusive waiters when we drop the lock.
309 */
310 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
311 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
312 (uintptr_t)curthread | x);
313 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
314 if (success)
315 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
316 file, line);
317 return (success);
318 }
319
320 /*
321 * Downgrade an unrecursed exclusive lock into a single shared lock.
322 */
323 void
324 _sx_downgrade(struct sx *sx, const char *file, int line)
325 {
326 uintptr_t x;
327
328 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
329 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
330 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
331 #ifndef INVARIANTS
332 if (sx_recursed(sx))
333 panic("downgrade of a recursed lock");
334 #endif
335
336 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
337
338 /*
339 * Try to switch from an exclusive lock with no shared waiters
340 * to one sharer with no shared waiters. If there are
341 * exclusive waiters, we don't need to lock the sleep queue so
342 * long as we preserve the flag. We do one quick try and if
343 * that fails we grab the sleepq lock to keep the flags from
344 * changing and do it the slow way.
345 *
346 * We have to lock the sleep queue if there are shared waiters
347 * so we can wake them up.
348 */
349 x = sx->sx_lock;
350 if (!(x & SX_LOCK_SHARED_WAITERS) &&
351 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
352 (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
353 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
354 return;
355 }
356
357 /*
358 * Lock the sleep queue so we can read the waiters bits
359 * without any races and wakeup any shared waiters.
360 */
361 sleepq_lock(&sx->lock_object);
362
363 /*
364 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
365 * shared lock. If there are any shared waiters, wake them up.
366 */
367 x = sx->sx_lock;
368 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
369 (x & SX_LOCK_EXCLUSIVE_WAITERS));
370 if (x & SX_LOCK_SHARED_WAITERS)
371 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
372 SQ_SHARED_QUEUE);
373 else
374 sleepq_release(&sx->lock_object);
375
376 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
377 }
378
379 /*
380 * This function represents the so-called 'hard case' for sx_xlock
381 * operation. All 'easy case' failures are redirected to this. Note
382 * that ideally this would be a static function, but it needs to be
383 * accessible from at least sx.h.
384 */
385 int
386 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
387 int line)
388 {
389 GIANT_DECLARE;
390 #ifdef ADAPTIVE_SX
391 volatile struct thread *owner;
392 #endif
393 /* uint64_t waittime = 0; */
394 uintptr_t x;
395 int /* contested = 0, */error = 0;
396
397 /* If we already hold an exclusive lock, then recurse. */
398 if (sx_xlocked(sx)) {
399 KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0,
400 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
401 sx->lock_object.lo_name, file, line));
402 sx->sx_recurse++;
403 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
404 if (LOCK_LOG_TEST(&sx->lock_object, 0))
405 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
406 return (0);
407 }
408
409 if (LOCK_LOG_TEST(&sx->lock_object, 0))
410 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
411 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
412
413 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
414 #ifdef ADAPTIVE_SX
415 /*
416 * If the lock is write locked and the owner is
417 * running on another CPU, spin until the owner stops
418 * running or the state of the lock changes.
419 */
420 x = sx->sx_lock;
421 if (!(x & SX_LOCK_SHARED) &&
422 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
423 x = SX_OWNER(x);
424 owner = (struct thread *)x;
425 if (TD_IS_RUNNING(owner)) {
426 if (LOCK_LOG_TEST(&sx->lock_object, 0))
427 CTR3(KTR_LOCK,
428 "%s: spinning on %p held by %p",
429 __func__, sx, owner);
430 GIANT_SAVE();
431 lock_profile_obtain_lock_failed(
432 &sx->lock_object, &contested, &waittime);
433 while (SX_OWNER(sx->sx_lock) == x &&
434 TD_IS_RUNNING(owner))
435 cpu_spinwait();
436 continue;
437 }
438 }
439 #endif
440
441 sleepq_lock(&sx->lock_object);
442 x = sx->sx_lock;
443
444 /*
445 * If the lock was released while spinning on the
446 * sleep queue chain lock, try again.
447 */
448 if (x == SX_LOCK_UNLOCKED) {
449 sleepq_release(&sx->lock_object);
450 continue;
451 }
452
453 #ifdef ADAPTIVE_SX
454 /*
455 * The current lock owner might have started executing
456 * on another CPU (or the lock could have changed
457 * owners) while we were waiting on the sleep queue
458 * chain lock. If so, drop the sleep queue lock and try
459 * again.
460 */
461 if (!(x & SX_LOCK_SHARED) &&
462 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
463 owner = (struct thread *)SX_OWNER(x);
464 if (TD_IS_RUNNING(owner)) {
465 sleepq_release(&sx->lock_object);
466 continue;
467 }
468 }
469 #endif
470
471 /*
472 * If an exclusive lock was released with both shared
473 * and exclusive waiters and a shared waiter hasn't
474 * woken up and acquired the lock yet, sx_lock will be
475 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
476 * If we see that value, try to acquire it once. Note
477 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
478 * as there are other exclusive waiters still. If we
479 * fail, restart the loop.
480 */
481 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
482 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
483 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
484 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
485 sleepq_release(&sx->lock_object);
486 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
487 __func__, sx);
488 break;
489 }
490 sleepq_release(&sx->lock_object);
491 continue;
492 }
493
494 /*
495 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail,
496 * than loop back and retry.
497 */
498 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
499 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
500 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
501 sleepq_release(&sx->lock_object);
502 continue;
503 }
504 if (LOCK_LOG_TEST(&sx->lock_object, 0))
505 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
506 __func__, sx);
507 }
508
509 /*
510 * Since we have been unable to acquire the exclusive
511 * lock and the exclusive waiters flag is set, we have
512 * to sleep.
513 */
514 if (LOCK_LOG_TEST(&sx->lock_object, 0))
515 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
516 __func__, sx);
517
518 GIANT_SAVE();
519 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
520 &waittime);
521 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
522 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
523 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
524 if (!(opts & SX_INTERRUPTIBLE))
525 sleepq_wait(&sx->lock_object);
526 else
527 error = sleepq_wait_sig(&sx->lock_object);
528
529 if (error) {
530 if (LOCK_LOG_TEST(&sx->lock_object, 0))
531 CTR2(KTR_LOCK,
532 "%s: interruptible sleep by %p suspended by signal",
533 __func__, sx);
534 break;
535 }
536 if (LOCK_LOG_TEST(&sx->lock_object, 0))
537 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
538 __func__, sx);
539 }
540
541 GIANT_RESTORE();
542 if (!error)
543 lock_profile_obtain_lock_success(&sx->lock_object, contested,
544 waittime, file, line);
545 return (error);
546 }
547
548 /*
549 * This function represents the so-called 'hard case' for sx_xunlock
550 * operation. All 'easy case' failures are redirected to this. Note
551 * that ideally this would be a static function, but it needs to be
552 * accessible from at least sx.h.
553 */
554 void
555 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
556 {
557 uintptr_t x;
558 int queue;
559
560 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
561
562 /* If the lock is recursed, then unrecurse one level. */
563 if (sx_xlocked(sx) && sx_recursed(sx)) {
564 if ((--sx->sx_recurse) == 0)
565 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
566 if (LOCK_LOG_TEST(&sx->lock_object, 0))
567 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
568 return;
569 }
570 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
571 SX_LOCK_EXCLUSIVE_WAITERS));
572 if (LOCK_LOG_TEST(&sx->lock_object, 0))
573 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
574
575 sleepq_lock(&sx->lock_object);
576 x = SX_LOCK_UNLOCKED;
577
578 /*
579 * The wake up algorithm here is quite simple and probably not
580 * ideal. It gives precedence to shared waiters if they are
581 * present. For this condition, we have to preserve the
582 * state of the exclusive waiters flag.
583 */
584 if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) {
585 queue = SQ_SHARED_QUEUE;
586 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
587 } else
588 queue = SQ_EXCLUSIVE_QUEUE;
589
590 /* Wake up all the waiters for the specific queue. */
591 if (LOCK_LOG_TEST(&sx->lock_object, 0))
592 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
593 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
594 "exclusive");
595 atomic_store_rel_ptr(&sx->sx_lock, x);
596 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue);
597 }
598
599 /*
600 * This function represents the so-called 'hard case' for sx_slock
601 * operation. All 'easy case' failures are redirected to this. Note
602 * that ideally this would be a static function, but it needs to be
603 * accessible from at least sx.h.
604 */
605 int
606 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
607 {
608 GIANT_DECLARE;
609 #ifdef ADAPTIVE_SX
610 volatile struct thread *owner;
611 #endif
612 #ifdef LOCK_PROFILING_SHARED
613 uint64_t waittime = 0;
614 int contested = 0;
615 #endif
616 uintptr_t x;
617 int error = 0;
618
619 /*
620 * As with rwlocks, we don't make any attempt to try to block
621 * shared locks once there is an exclusive waiter.
622 */
623 for (;;) {
624 x = sx->sx_lock;
625
626 /*
627 * If no other thread has an exclusive lock then try to bump up
628 * the count of sharers. Since we have to preserve the state
629 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
630 * shared lock loop back and retry.
631 */
632 if (x & SX_LOCK_SHARED) {
633 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
634 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
635 x + SX_ONE_SHARER)) {
636 #ifdef LOCK_PROFILING_SHARED
637 if (SX_SHARERS(x) == 0)
638 lock_profile_obtain_lock_success(
639 &sx->lock_object, contested,
640 waittime, file, line);
641 #endif
642 if (LOCK_LOG_TEST(&sx->lock_object, 0))
643 CTR4(KTR_LOCK,
644 "%s: %p succeed %p -> %p", __func__,
645 sx, (void *)x,
646 (void *)(x + SX_ONE_SHARER));
647 break;
648 }
649 continue;
650 }
651
652 #ifdef ADAPTIVE_SX
653 /*
654 * If the owner is running on another CPU, spin until
655 * the owner stops running or the state of the lock
656 * changes.
657 */
658 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
659 x = SX_OWNER(x);
660 owner = (struct thread *)x;
661 if (TD_IS_RUNNING(owner)) {
662 if (LOCK_LOG_TEST(&sx->lock_object, 0))
663 CTR3(KTR_LOCK,
664 "%s: spinning on %p held by %p",
665 __func__, sx, owner);
666 GIANT_SAVE();
667 #ifdef LOCK_PROFILING_SHARED
668 lock_profile_obtain_lock_failed(
669 &sx->lock_object, &contested, &waittime);
670 #endif
671 while (SX_OWNER(sx->sx_lock) == x &&
672 TD_IS_RUNNING(owner))
673 cpu_spinwait();
674 continue;
675 }
676 }
677 #endif
678
679 /*
680 * Some other thread already has an exclusive lock, so
681 * start the process of blocking.
682 */
683 sleepq_lock(&sx->lock_object);
684 x = sx->sx_lock;
685
686 /*
687 * The lock could have been released while we spun.
688 * In this case loop back and retry.
689 */
690 if (x & SX_LOCK_SHARED) {
691 sleepq_release(&sx->lock_object);
692 continue;
693 }
694
695 #ifdef ADAPTIVE_SX
696 /*
697 * If the owner is running on another CPU, spin until
698 * the owner stops running or the state of the lock
699 * changes.
700 */
701 if (!(x & SX_LOCK_SHARED) &&
702 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
703 owner = (struct thread *)SX_OWNER(x);
704 if (TD_IS_RUNNING(owner)) {
705 sleepq_release(&sx->lock_object);
706 continue;
707 }
708 }
709 #endif
710
711 /*
712 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we
713 * fail to set it drop the sleep queue lock and loop
714 * back.
715 */
716 if (!(x & SX_LOCK_SHARED_WAITERS)) {
717 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
718 x | SX_LOCK_SHARED_WAITERS)) {
719 sleepq_release(&sx->lock_object);
720 continue;
721 }
722 if (LOCK_LOG_TEST(&sx->lock_object, 0))
723 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
724 __func__, sx);
725 }
726
727 /*
728 * Since we have been unable to acquire the shared lock,
729 * we have to sleep.
730 */
731 if (LOCK_LOG_TEST(&sx->lock_object, 0))
732 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
733 __func__, sx);
734
735 GIANT_SAVE();
736 #ifdef LOCK_PROFILING_SHARED
737 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
738 &waittime);
739 #endif
740 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
741 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
742 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
743 if (!(opts & SX_INTERRUPTIBLE))
744 sleepq_wait(&sx->lock_object);
745 else
746 error = sleepq_wait_sig(&sx->lock_object);
747
748 if (error) {
749 if (LOCK_LOG_TEST(&sx->lock_object, 0))
750 CTR2(KTR_LOCK,
751 "%s: interruptible sleep by %p suspended by signal",
752 __func__, sx);
753 break;
754 }
755 if (LOCK_LOG_TEST(&sx->lock_object, 0))
756 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
757 __func__, sx);
758 }
759
760 GIANT_RESTORE();
761 return (error);
762 }
763
764 /*
765 * This function represents the so-called 'hard case' for sx_sunlock
766 * operation. All 'easy case' failures are redirected to this. Note
767 * that ideally this would be a static function, but it needs to be
768 * accessible from at least sx.h.
769 */
770 void
771 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
772 {
773 uintptr_t x;
774
775 for (;;) {
776 x = sx->sx_lock;
777
778 /*
779 * We should never have sharers while at least one thread
780 * holds a shared lock.
781 */
782 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
783 ("%s: waiting sharers", __func__));
784
785 /*
786 * See if there is more than one shared lock held. If
787 * so, just drop one and return.
788 */
789 if (SX_SHARERS(x) > 1) {
790 if (atomic_cmpset_ptr(&sx->sx_lock, x,
791 x - SX_ONE_SHARER)) {
792 if (LOCK_LOG_TEST(&sx->lock_object, 0))
793 CTR4(KTR_LOCK,
794 "%s: %p succeeded %p -> %p",
795 __func__, sx, (void *)x,
796 (void *)(x - SX_ONE_SHARER));
797 break;
798 }
799 continue;
800 }
801
802 /*
803 * If there aren't any waiters for an exclusive lock,
804 * then try to drop it quickly.
805 */
806 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
807 MPASS(x == SX_SHARERS_LOCK(1));
808 if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
809 SX_LOCK_UNLOCKED)) {
810 if (LOCK_LOG_TEST(&sx->lock_object, 0))
811 CTR2(KTR_LOCK, "%s: %p last succeeded",
812 __func__, sx);
813 break;
814 }
815 continue;
816 }
817
818 /*
819 * At this point, there should just be one sharer with
820 * exclusive waiters.
821 */
822 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
823
824 sleepq_lock(&sx->lock_object);
825
826 /*
827 * Wake up semantic here is quite simple:
828 * Just wake up all the exclusive waiters.
829 * Note that the state of the lock could have changed,
830 * so if it fails loop back and retry.
831 */
832 if (!atomic_cmpset_ptr(&sx->sx_lock,
833 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
834 SX_LOCK_UNLOCKED)) {
835 sleepq_release(&sx->lock_object);
836 continue;
837 }
838 if (LOCK_LOG_TEST(&sx->lock_object, 0))
839 CTR2(KTR_LOCK, "%s: %p waking up all thread on"
840 "exclusive queue", __func__, sx);
841 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
842 SQ_EXCLUSIVE_QUEUE);
843 break;
844 }
845 }
846
847 #ifdef INVARIANT_SUPPORT
848 #ifndef INVARIANTS
849 #undef _sx_assert
850 #endif
851
852 /*
853 * In the non-WITNESS case, sx_assert() can only detect that at least
854 * *some* thread owns an slock, but it cannot guarantee that *this*
855 * thread owns an slock.
856 */
857 void
858 _sx_assert(struct sx *sx, int what, const char *file, int line)
859 {
860 #ifndef WITNESS
861 int slocked = 0;
862 #endif
863
864 if (panicstr != NULL)
865 return;
866 switch (what) {
867 case SA_SLOCKED:
868 case SA_SLOCKED | SA_NOTRECURSED:
869 case SA_SLOCKED | SA_RECURSED:
870 #ifndef WITNESS
871 slocked = 1;
872 /* FALLTHROUGH */
873 #endif
874 case SA_LOCKED:
875 case SA_LOCKED | SA_NOTRECURSED:
876 case SA_LOCKED | SA_RECURSED:
877 #ifdef WITNESS
878 witness_assert(&sx->lock_object, what, file, line);
879 #else
880 /*
881 * If some other thread has an exclusive lock or we
882 * have one and are asserting a shared lock, fail.
883 * Also, if no one has a lock at all, fail.
884 */
885 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
886 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
887 sx_xholder(sx) != curthread)))
888 panic("Lock %s not %slocked @ %s:%d\n",
889 sx->lock_object.lo_name, slocked ? "share " : "",
890 file, line);
891
892 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
893 if (sx_recursed(sx)) {
894 if (what & SA_NOTRECURSED)
895 panic("Lock %s recursed @ %s:%d\n",
896 sx->lock_object.lo_name, file,
897 line);
898 } else if (what & SA_RECURSED)
899 panic("Lock %s not recursed @ %s:%d\n",
900 sx->lock_object.lo_name, file, line);
901 }
902 #endif
903 break;
904 case SA_XLOCKED:
905 case SA_XLOCKED | SA_NOTRECURSED:
906 case SA_XLOCKED | SA_RECURSED:
907 if (sx_xholder(sx) != curthread)
908 panic("Lock %s not exclusively locked @ %s:%d\n",
909 sx->lock_object.lo_name, file, line);
910 if (sx_recursed(sx)) {
911 if (what & SA_NOTRECURSED)
912 panic("Lock %s recursed @ %s:%d\n",
913 sx->lock_object.lo_name, file, line);
914 } else if (what & SA_RECURSED)
915 panic("Lock %s not recursed @ %s:%d\n",
916 sx->lock_object.lo_name, file, line);
917 break;
918 case SA_UNLOCKED:
919 #ifdef WITNESS
920 witness_assert(&sx->lock_object, what, file, line);
921 #else
922 /*
923 * If we hold an exclusve lock fail. We can't
924 * reliably check to see if we hold a shared lock or
925 * not.
926 */
927 if (sx_xholder(sx) == curthread)
928 panic("Lock %s exclusively locked @ %s:%d\n",
929 sx->lock_object.lo_name, file, line);
930 #endif
931 break;
932 default:
933 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
934 line);
935 }
936 }
937 #endif /* INVARIANT_SUPPORT */
938
939 #ifdef DDB
940 static void
941 db_show_sx(struct lock_object *lock)
942 {
943 struct thread *td;
944 struct sx *sx;
945
946 sx = (struct sx *)lock;
947
948 db_printf(" state: ");
949 if (sx->sx_lock == SX_LOCK_UNLOCKED)
950 db_printf("UNLOCKED\n");
951 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
952 db_printf("DESTROYED\n");
953 return;
954 } else if (sx->sx_lock & SX_LOCK_SHARED)
955 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
956 else {
957 td = sx_xholder(sx);
958 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
959 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
960 if (sx_recursed(sx))
961 db_printf(" recursed: %d\n", sx->sx_recurse);
962 }
963
964 db_printf(" waiters: ");
965 switch(sx->sx_lock &
966 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
967 case SX_LOCK_SHARED_WAITERS:
968 db_printf("shared\n");
969 break;
970 case SX_LOCK_EXCLUSIVE_WAITERS:
971 db_printf("exclusive\n");
972 break;
973 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
974 db_printf("exclusive and shared\n");
975 break;
976 default:
977 db_printf("none\n");
978 }
979 }
980
981 /*
982 * Check to see if a thread that is blocked on a sleep queue is actually
983 * blocked on an sx lock. If so, output some details and return true.
984 * If the lock has an exclusive owner, return that in *ownerp.
985 */
986 int
987 sx_chain(struct thread *td, struct thread **ownerp)
988 {
989 struct sx *sx;
990
991 /*
992 * Check to see if this thread is blocked on an sx lock.
993 * First, we check the lock class. If that is ok, then we
994 * compare the lock name against the wait message.
995 */
996 sx = td->td_wchan;
997 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
998 sx->lock_object.lo_name != td->td_wmesg)
999 return (0);
1000
1001 /* We think we have an sx lock, so output some details. */
1002 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1003 *ownerp = sx_xholder(sx);
1004 if (sx->sx_lock & SX_LOCK_SHARED)
1005 db_printf("SLOCK (count %ju)\n",
1006 (uintmax_t)SX_SHARERS(sx->sx_lock));
1007 else
1008 db_printf("XLOCK\n");
1009 return (1);
1010 }
1011 #endif
Cache object: 8f2ff59cf2d3a05eea5c1f45a38cb62e
|