The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
    3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice(s), this list of conditions and the following disclaimer as
   11  *    the first lines of this file unmodified other than the possible
   12  *    addition of one or more copyright notices.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice(s), this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   27  * DAMAGE.
   28  */
   29 
   30 /*
   31  * Shared/exclusive locks.  This implementation attempts to ensure
   32  * deterministic lock granting behavior, so that slocks and xlocks are
   33  * interleaved.
   34  *
   35  * Priority propagation will not generally raise the priority of lock holders,
   36  * so should not be relied upon in combination with sx locks.
   37  */
   38 
   39 #include "opt_ddb.h"
   40 #include "opt_hwpmc_hooks.h"
   41 #include "opt_kdtrace.h"
   42 #include "opt_no_adaptive_sx.h"
   43 
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD$");
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/kdb.h>
   50 #include <sys/ktr.h>
   51 #include <sys/lock.h>
   52 #include <sys/mutex.h>
   53 #include <sys/proc.h>
   54 #include <sys/sleepqueue.h>
   55 #include <sys/sx.h>
   56 #include <sys/sysctl.h>
   57 
   58 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
   59 #include <machine/cpu.h>
   60 #endif
   61 
   62 #ifdef DDB
   63 #include <ddb/ddb.h>
   64 #endif
   65 
   66 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
   67 #define ADAPTIVE_SX
   68 #endif
   69 
   70 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
   71 
   72 #ifdef HWPMC_HOOKS
   73 #include <sys/pmckern.h>
   74 PMC_SOFT_DECLARE( , , lock, failed);
   75 #endif
   76 
   77 /* Handy macros for sleep queues. */
   78 #define SQ_EXCLUSIVE_QUEUE      0
   79 #define SQ_SHARED_QUEUE         1
   80 
   81 #ifdef ADAPTIVE_SX
   82 #define ASX_RETRIES             10
   83 #define ASX_LOOPS               10000
   84 #endif
   85 
   86 /*
   87  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
   88  * drop Giant anytime we have to sleep or if we adaptively spin.
   89  */
   90 #define GIANT_DECLARE                                                   \
   91         int _giantcnt = 0;                                              \
   92         WITNESS_SAVE_DECL(Giant)                                        \
   93 
   94 #define GIANT_SAVE() do {                                               \
   95         if (mtx_owned(&Giant)) {                                        \
   96                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
   97                 while (mtx_owned(&Giant)) {                             \
   98                         _giantcnt++;                                    \
   99                         mtx_unlock(&Giant);                             \
  100                 }                                                       \
  101         }                                                               \
  102 } while (0)
  103 
  104 #define GIANT_RESTORE() do {                                            \
  105         if (_giantcnt > 0) {                                            \
  106                 mtx_assert(&Giant, MA_NOTOWNED);                        \
  107                 while (_giantcnt--)                                     \
  108                         mtx_lock(&Giant);                               \
  109                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  110         }                                                               \
  111 } while (0)
  112 
  113 /*
  114  * Returns true if an exclusive lock is recursed.  It assumes
  115  * curthread currently has an exclusive lock.
  116  */
  117 #define sx_recurse              lock_object.lo_data
  118 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
  119 
  120 static void     assert_sx(struct lock_object *lock, int what);
  121 #ifdef DDB
  122 static void     db_show_sx(struct lock_object *lock);
  123 #endif
  124 static void     lock_sx(struct lock_object *lock, int how);
  125 #ifdef KDTRACE_HOOKS
  126 static int      owner_sx(struct lock_object *lock, struct thread **owner);
  127 #endif
  128 static int      unlock_sx(struct lock_object *lock);
  129 
  130 struct lock_class lock_class_sx = {
  131         .lc_name = "sx",
  132         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
  133         .lc_assert = assert_sx,
  134 #ifdef DDB
  135         .lc_ddb_show = db_show_sx,
  136 #endif
  137         .lc_lock = lock_sx,
  138         .lc_unlock = unlock_sx,
  139 #ifdef KDTRACE_HOOKS
  140         .lc_owner = owner_sx,
  141 #endif
  142 };
  143 
  144 #ifndef INVARIANTS
  145 #define _sx_assert(sx, what, file, line)
  146 #endif
  147 
  148 void
  149 assert_sx(struct lock_object *lock, int what)
  150 {
  151 
  152         sx_assert((struct sx *)lock, what);
  153 }
  154 
  155 void
  156 lock_sx(struct lock_object *lock, int how)
  157 {
  158         struct sx *sx;
  159 
  160         sx = (struct sx *)lock;
  161         if (how)
  162                 sx_xlock(sx);
  163         else
  164                 sx_slock(sx);
  165 }
  166 
  167 int
  168 unlock_sx(struct lock_object *lock)
  169 {
  170         struct sx *sx;
  171 
  172         sx = (struct sx *)lock;
  173         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
  174         if (sx_xlocked(sx)) {
  175                 sx_xunlock(sx);
  176                 return (1);
  177         } else {
  178                 sx_sunlock(sx);
  179                 return (0);
  180         }
  181 }
  182 
  183 #ifdef KDTRACE_HOOKS
  184 int
  185 owner_sx(struct lock_object *lock, struct thread **owner)
  186 {
  187         struct sx *sx = (struct sx *)lock;
  188         uintptr_t x = sx->sx_lock;
  189 
  190         *owner = (struct thread *)SX_OWNER(x);
  191         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
  192             (*owner != NULL));
  193 }
  194 #endif
  195 
  196 void
  197 sx_sysinit(void *arg)
  198 {
  199         struct sx_args *sargs = arg;
  200 
  201         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
  202 }
  203 
  204 void
  205 sx_init_flags(struct sx *sx, const char *description, int opts)
  206 {
  207         int flags;
  208 
  209         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
  210             SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
  211         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
  212             ("%s: sx_lock not aligned for %s: %p", __func__, description,
  213             &sx->sx_lock));
  214 
  215         flags = LO_SLEEPABLE | LO_UPGRADABLE;
  216         if (opts & SX_DUPOK)
  217                 flags |= LO_DUPOK;
  218         if (opts & SX_NOPROFILE)
  219                 flags |= LO_NOPROFILE;
  220         if (!(opts & SX_NOWITNESS))
  221                 flags |= LO_WITNESS;
  222         if (opts & SX_RECURSE)
  223                 flags |= LO_RECURSABLE;
  224         if (opts & SX_QUIET)
  225                 flags |= LO_QUIET;
  226 
  227         flags |= opts & SX_NOADAPTIVE;
  228         sx->sx_lock = SX_LOCK_UNLOCKED;
  229         sx->sx_recurse = 0;
  230         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
  231 }
  232 
  233 void
  234 sx_destroy(struct sx *sx)
  235 {
  236 
  237         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
  238         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
  239         sx->sx_lock = SX_LOCK_DESTROYED;
  240         lock_destroy(&sx->lock_object);
  241 }
  242 
  243 int
  244 _sx_slock(struct sx *sx, int opts, const char *file, int line)
  245 {
  246         int error = 0;
  247 
  248         if (SCHEDULER_STOPPED())
  249                 return (0);
  250         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  251             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
  252             curthread, sx->lock_object.lo_name, file, line));
  253         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  254             ("sx_slock() of destroyed sx @ %s:%d", file, line));
  255         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
  256         error = __sx_slock(sx, opts, file, line);
  257         if (!error) {
  258                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
  259                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
  260                 curthread->td_locks++;
  261         }
  262 
  263         return (error);
  264 }
  265 
  266 int
  267 _sx_try_slock(struct sx *sx, const char *file, int line)
  268 {
  269         uintptr_t x;
  270 
  271         if (SCHEDULER_STOPPED())
  272                 return (1);
  273 
  274         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  275             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
  276             curthread, sx->lock_object.lo_name, file, line));
  277 
  278         for (;;) {
  279                 x = sx->sx_lock;
  280                 KASSERT(x != SX_LOCK_DESTROYED,
  281                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
  282                 if (!(x & SX_LOCK_SHARED))
  283                         break;
  284                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
  285                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
  286                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
  287                         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE,
  288                             sx, 0, 0, file, line);
  289                         curthread->td_locks++;
  290                         return (1);
  291                 }
  292         }
  293 
  294         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
  295         return (0);
  296 }
  297 
  298 int
  299 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
  300 {
  301         int error = 0;
  302 
  303         if (SCHEDULER_STOPPED())
  304                 return (0);
  305         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  306             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
  307             curthread, sx->lock_object.lo_name, file, line));
  308         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  309             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
  310         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
  311             line, NULL);
  312         error = __sx_xlock(sx, curthread, opts, file, line);
  313         if (!error) {
  314                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
  315                     file, line);
  316                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  317                 curthread->td_locks++;
  318         }
  319 
  320         return (error);
  321 }
  322 
  323 int
  324 _sx_try_xlock(struct sx *sx, const char *file, int line)
  325 {
  326         int rval;
  327 
  328         if (SCHEDULER_STOPPED())
  329                 return (1);
  330 
  331         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  332             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
  333             curthread, sx->lock_object.lo_name, file, line));
  334         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  335             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
  336 
  337         if (sx_xlocked(sx) &&
  338             (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  339                 sx->sx_recurse++;
  340                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  341                 rval = 1;
  342         } else
  343                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
  344                     (uintptr_t)curthread);
  345         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
  346         if (rval) {
  347                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  348                     file, line);
  349                 if (!sx_recursed(sx))
  350                         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
  351                             sx, 0, 0, file, line);
  352                 curthread->td_locks++;
  353         }
  354 
  355         return (rval);
  356 }
  357 
  358 void
  359 _sx_sunlock(struct sx *sx, const char *file, int line)
  360 {
  361 
  362         if (SCHEDULER_STOPPED())
  363                 return;
  364         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  365             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
  366         _sx_assert(sx, SA_SLOCKED, file, line);
  367         curthread->td_locks--;
  368         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
  369         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
  370         __sx_sunlock(sx, file, line);
  371         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
  372 }
  373 
  374 void
  375 _sx_xunlock(struct sx *sx, const char *file, int line)
  376 {
  377 
  378         if (SCHEDULER_STOPPED())
  379                 return;
  380         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  381             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
  382         _sx_assert(sx, SA_XLOCKED, file, line);
  383         curthread->td_locks--;
  384         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  385         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
  386             line);
  387         if (!sx_recursed(sx))
  388                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
  389         __sx_xunlock(sx, curthread, file, line);
  390 }
  391 
  392 /*
  393  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
  394  * This will only succeed if this thread holds a single shared lock.
  395  * Return 1 if if the upgrade succeed, 0 otherwise.
  396  */
  397 int
  398 _sx_try_upgrade(struct sx *sx, const char *file, int line)
  399 {
  400         uintptr_t x;
  401         int success;
  402 
  403         if (SCHEDULER_STOPPED())
  404                 return (1);
  405 
  406         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  407             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
  408         _sx_assert(sx, SA_SLOCKED, file, line);
  409 
  410         /*
  411          * Try to switch from one shared lock to an exclusive lock.  We need
  412          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
  413          * we will wake up the exclusive waiters when we drop the lock.
  414          */
  415         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
  416         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
  417             (uintptr_t)curthread | x);
  418         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
  419         if (success) {
  420                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  421                     file, line);
  422                 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
  423         }
  424         return (success);
  425 }
  426 
  427 /*
  428  * Downgrade an unrecursed exclusive lock into a single shared lock.
  429  */
  430 void
  431 _sx_downgrade(struct sx *sx, const char *file, int line)
  432 {
  433         uintptr_t x;
  434         int wakeup_swapper;
  435 
  436         if (SCHEDULER_STOPPED())
  437                 return;
  438 
  439         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  440             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
  441         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
  442 #ifndef INVARIANTS
  443         if (sx_recursed(sx))
  444                 panic("downgrade of a recursed lock");
  445 #endif
  446 
  447         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
  448 
  449         /*
  450          * Try to switch from an exclusive lock with no shared waiters
  451          * to one sharer with no shared waiters.  If there are
  452          * exclusive waiters, we don't need to lock the sleep queue so
  453          * long as we preserve the flag.  We do one quick try and if
  454          * that fails we grab the sleepq lock to keep the flags from
  455          * changing and do it the slow way.
  456          *
  457          * We have to lock the sleep queue if there are shared waiters
  458          * so we can wake them up.
  459          */
  460         x = sx->sx_lock;
  461         if (!(x & SX_LOCK_SHARED_WAITERS) &&
  462             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
  463             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
  464                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  465                 return;
  466         }
  467 
  468         /*
  469          * Lock the sleep queue so we can read the waiters bits
  470          * without any races and wakeup any shared waiters.
  471          */
  472         sleepq_lock(&sx->lock_object);
  473 
  474         /*
  475          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
  476          * shared lock.  If there are any shared waiters, wake them up.
  477          */
  478         wakeup_swapper = 0;
  479         x = sx->sx_lock;
  480         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
  481             (x & SX_LOCK_EXCLUSIVE_WAITERS));
  482         if (x & SX_LOCK_SHARED_WAITERS)
  483                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
  484                     0, SQ_SHARED_QUEUE);
  485         sleepq_release(&sx->lock_object);
  486 
  487         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  488         LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
  489 
  490         if (wakeup_swapper)
  491                 kick_proc0();
  492 }
  493 
  494 /*
  495  * This function represents the so-called 'hard case' for sx_xlock
  496  * operation.  All 'easy case' failures are redirected to this.  Note
  497  * that ideally this would be a static function, but it needs to be
  498  * accessible from at least sx.h.
  499  */
  500 int
  501 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
  502     int line)
  503 {
  504         GIANT_DECLARE;
  505 #ifdef ADAPTIVE_SX
  506         volatile struct thread *owner;
  507         u_int i, spintries = 0;
  508 #endif
  509         uintptr_t x;
  510 #ifdef LOCK_PROFILING
  511         uint64_t waittime = 0;
  512         int contested = 0;
  513 #endif
  514         int error = 0;
  515 #ifdef  KDTRACE_HOOKS
  516         uintptr_t state;
  517         uint64_t spin_cnt = 0;
  518         uint64_t sleep_cnt = 0;
  519         int64_t sleep_time = 0;
  520         int64_t all_time = 0;
  521 #endif
  522 
  523         if (SCHEDULER_STOPPED())
  524                 return (0);
  525 
  526         /* If we already hold an exclusive lock, then recurse. */
  527         if (sx_xlocked(sx)) {
  528                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
  529             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
  530                     sx->lock_object.lo_name, file, line));
  531                 sx->sx_recurse++;
  532                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  533                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  534                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
  535                 return (0);
  536         }
  537 
  538         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  539                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
  540                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
  541 
  542 #ifdef KDTRACE_HOOKS
  543         all_time -= lockstat_nsecs(&sx->lock_object);
  544         state = sx->sx_lock;
  545 #endif
  546         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
  547 #ifdef KDTRACE_HOOKS
  548                 spin_cnt++;
  549 #endif
  550 #ifdef HWPMC_HOOKS
  551                 PMC_SOFT_CALL( , , lock, failed);
  552 #endif
  553                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  554                     &waittime);
  555 #ifdef ADAPTIVE_SX
  556                 /*
  557                  * If the lock is write locked and the owner is
  558                  * running on another CPU, spin until the owner stops
  559                  * running or the state of the lock changes.
  560                  */
  561                 x = sx->sx_lock;
  562                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  563                         if ((x & SX_LOCK_SHARED) == 0) {
  564                                 x = SX_OWNER(x);
  565                                 owner = (struct thread *)x;
  566                                 if (TD_IS_RUNNING(owner)) {
  567                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  568                                                 CTR3(KTR_LOCK,
  569                                             "%s: spinning on %p held by %p",
  570                                                     __func__, sx, owner);
  571                                         GIANT_SAVE();
  572                                         while (SX_OWNER(sx->sx_lock) == x &&
  573                                             TD_IS_RUNNING(owner)) {
  574                                                 cpu_spinwait();
  575 #ifdef KDTRACE_HOOKS
  576                                                 spin_cnt++;
  577 #endif
  578                                         }
  579                                         continue;
  580                                 }
  581                         } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
  582                                 GIANT_SAVE();
  583                                 spintries++;
  584                                 for (i = 0; i < ASX_LOOPS; i++) {
  585                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  586                                                 CTR4(KTR_LOCK,
  587                                     "%s: shared spinning on %p with %u and %u",
  588                                                     __func__, sx, spintries, i);
  589                                         x = sx->sx_lock;
  590                                         if ((x & SX_LOCK_SHARED) == 0 ||
  591                                             SX_SHARERS(x) == 0)
  592                                                 break;
  593                                         cpu_spinwait();
  594 #ifdef KDTRACE_HOOKS
  595                                         spin_cnt++;
  596 #endif
  597                                 }
  598                                 if (i != ASX_LOOPS)
  599                                         continue;
  600                         }
  601                 }
  602 #endif
  603 
  604                 sleepq_lock(&sx->lock_object);
  605                 x = sx->sx_lock;
  606 
  607                 /*
  608                  * If the lock was released while spinning on the
  609                  * sleep queue chain lock, try again.
  610                  */
  611                 if (x == SX_LOCK_UNLOCKED) {
  612                         sleepq_release(&sx->lock_object);
  613                         continue;
  614                 }
  615 
  616 #ifdef ADAPTIVE_SX
  617                 /*
  618                  * The current lock owner might have started executing
  619                  * on another CPU (or the lock could have changed
  620                  * owners) while we were waiting on the sleep queue
  621                  * chain lock.  If so, drop the sleep queue lock and try
  622                  * again.
  623                  */
  624                 if (!(x & SX_LOCK_SHARED) &&
  625                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  626                         owner = (struct thread *)SX_OWNER(x);
  627                         if (TD_IS_RUNNING(owner)) {
  628                                 sleepq_release(&sx->lock_object);
  629                                 continue;
  630                         }
  631                 }
  632 #endif
  633 
  634                 /*
  635                  * If an exclusive lock was released with both shared
  636                  * and exclusive waiters and a shared waiter hasn't
  637                  * woken up and acquired the lock yet, sx_lock will be
  638                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
  639                  * If we see that value, try to acquire it once.  Note
  640                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
  641                  * as there are other exclusive waiters still.  If we
  642                  * fail, restart the loop.
  643                  */
  644                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
  645                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
  646                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
  647                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
  648                                 sleepq_release(&sx->lock_object);
  649                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
  650                                     __func__, sx);
  651                                 break;
  652                         }
  653                         sleepq_release(&sx->lock_object);
  654                         continue;
  655                 }
  656 
  657                 /*
  658                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
  659                  * than loop back and retry.
  660                  */
  661                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
  662                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  663                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
  664                                 sleepq_release(&sx->lock_object);
  665                                 continue;
  666                         }
  667                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  668                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
  669                                     __func__, sx);
  670                 }
  671 
  672                 /*
  673                  * Since we have been unable to acquire the exclusive
  674                  * lock and the exclusive waiters flag is set, we have
  675                  * to sleep.
  676                  */
  677                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  678                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  679                             __func__, sx);
  680 
  681 #ifdef KDTRACE_HOOKS
  682                 sleep_time -= lockstat_nsecs(&sx->lock_object);
  683 #endif
  684                 GIANT_SAVE();
  685                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  686                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  687                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
  688                 if (!(opts & SX_INTERRUPTIBLE))
  689                         sleepq_wait(&sx->lock_object, 0);
  690                 else
  691                         error = sleepq_wait_sig(&sx->lock_object, 0);
  692 #ifdef KDTRACE_HOOKS
  693                 sleep_time += lockstat_nsecs(&sx->lock_object);
  694                 sleep_cnt++;
  695 #endif
  696                 if (error) {
  697                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  698                                 CTR2(KTR_LOCK,
  699                         "%s: interruptible sleep by %p suspended by signal",
  700                                     __func__, sx);
  701                         break;
  702                 }
  703                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  704                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  705                             __func__, sx);
  706         }
  707 #ifdef KDTRACE_HOOKS
  708         all_time += lockstat_nsecs(&sx->lock_object);
  709         if (sleep_time)
  710                 LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
  711                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
  712                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
  713         if (spin_cnt > sleep_cnt)
  714                 LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
  715                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
  716                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
  717 #endif
  718         if (!error)
  719                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
  720                     contested, waittime, file, line);
  721         GIANT_RESTORE();
  722         return (error);
  723 }
  724 
  725 /*
  726  * This function represents the so-called 'hard case' for sx_xunlock
  727  * operation.  All 'easy case' failures are redirected to this.  Note
  728  * that ideally this would be a static function, but it needs to be
  729  * accessible from at least sx.h.
  730  */
  731 void
  732 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
  733 {
  734         uintptr_t x;
  735         int queue, wakeup_swapper;
  736 
  737         if (SCHEDULER_STOPPED())
  738                 return;
  739 
  740         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
  741 
  742         /* If the lock is recursed, then unrecurse one level. */
  743         if (sx_xlocked(sx) && sx_recursed(sx)) {
  744                 if ((--sx->sx_recurse) == 0)
  745                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  746                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  747                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
  748                 return;
  749         }
  750         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
  751             SX_LOCK_EXCLUSIVE_WAITERS));
  752         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  753                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
  754 
  755         sleepq_lock(&sx->lock_object);
  756         x = SX_LOCK_UNLOCKED;
  757 
  758         /*
  759          * The wake up algorithm here is quite simple and probably not
  760          * ideal.  It gives precedence to shared waiters if they are
  761          * present.  For this condition, we have to preserve the
  762          * state of the exclusive waiters flag.
  763          * If interruptible sleeps left the shared queue empty avoid a
  764          * starvation for the threads sleeping on the exclusive queue by giving
  765          * them precedence and cleaning up the shared waiters bit anyway.
  766          */
  767         if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
  768             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
  769                 queue = SQ_SHARED_QUEUE;
  770                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
  771         } else
  772                 queue = SQ_EXCLUSIVE_QUEUE;
  773 
  774         /* Wake up all the waiters for the specific queue. */
  775         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  776                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
  777                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
  778                     "exclusive");
  779         atomic_store_rel_ptr(&sx->sx_lock, x);
  780         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
  781             queue);
  782         sleepq_release(&sx->lock_object);
  783         if (wakeup_swapper)
  784                 kick_proc0();
  785 }
  786 
  787 /*
  788  * This function represents the so-called 'hard case' for sx_slock
  789  * operation.  All 'easy case' failures are redirected to this.  Note
  790  * that ideally this would be a static function, but it needs to be
  791  * accessible from at least sx.h.
  792  */
  793 int
  794 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
  795 {
  796         GIANT_DECLARE;
  797 #ifdef ADAPTIVE_SX
  798         volatile struct thread *owner;
  799 #endif
  800 #ifdef LOCK_PROFILING
  801         uint64_t waittime = 0;
  802         int contested = 0;
  803 #endif
  804         uintptr_t x;
  805         int error = 0;
  806 #ifdef KDTRACE_HOOKS
  807         uintptr_t state;
  808         uint64_t spin_cnt = 0;
  809         uint64_t sleep_cnt = 0;
  810         int64_t sleep_time = 0;
  811         int64_t all_time = 0;
  812 #endif
  813 
  814         if (SCHEDULER_STOPPED())
  815                 return (0);
  816 
  817 #ifdef KDTRACE_HOOKS
  818         state = sx->sx_lock;
  819         all_time -= lockstat_nsecs(&sx->lock_object);
  820 #endif
  821 
  822         /*
  823          * As with rwlocks, we don't make any attempt to try to block
  824          * shared locks once there is an exclusive waiter.
  825          */
  826         for (;;) {
  827 #ifdef KDTRACE_HOOKS
  828                 spin_cnt++;
  829 #endif
  830                 x = sx->sx_lock;
  831 
  832                 /*
  833                  * If no other thread has an exclusive lock then try to bump up
  834                  * the count of sharers.  Since we have to preserve the state
  835                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
  836                  * shared lock loop back and retry.
  837                  */
  838                 if (x & SX_LOCK_SHARED) {
  839                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
  840                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
  841                             x + SX_ONE_SHARER)) {
  842                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  843                                         CTR4(KTR_LOCK,
  844                                             "%s: %p succeed %p -> %p", __func__,
  845                                             sx, (void *)x,
  846                                             (void *)(x + SX_ONE_SHARER));
  847                                 break;
  848                         }
  849                         continue;
  850                 }
  851 #ifdef HWPMC_HOOKS
  852                 PMC_SOFT_CALL( , , lock, failed);
  853 #endif
  854                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  855                     &waittime);
  856 
  857 #ifdef ADAPTIVE_SX
  858                 /*
  859                  * If the owner is running on another CPU, spin until
  860                  * the owner stops running or the state of the lock
  861                  * changes.
  862                  */
  863                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  864                         x = SX_OWNER(x);
  865                         owner = (struct thread *)x;
  866                         if (TD_IS_RUNNING(owner)) {
  867                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  868                                         CTR3(KTR_LOCK,
  869                                             "%s: spinning on %p held by %p",
  870                                             __func__, sx, owner);
  871                                 GIANT_SAVE();
  872                                 while (SX_OWNER(sx->sx_lock) == x &&
  873                                     TD_IS_RUNNING(owner)) {
  874 #ifdef KDTRACE_HOOKS
  875                                         spin_cnt++;
  876 #endif
  877                                         cpu_spinwait();
  878                                 }
  879                                 continue;
  880                         }
  881                 }
  882 #endif
  883 
  884                 /*
  885                  * Some other thread already has an exclusive lock, so
  886                  * start the process of blocking.
  887                  */
  888                 sleepq_lock(&sx->lock_object);
  889                 x = sx->sx_lock;
  890 
  891                 /*
  892                  * The lock could have been released while we spun.
  893                  * In this case loop back and retry.
  894                  */
  895                 if (x & SX_LOCK_SHARED) {
  896                         sleepq_release(&sx->lock_object);
  897                         continue;
  898                 }
  899 
  900 #ifdef ADAPTIVE_SX
  901                 /*
  902                  * If the owner is running on another CPU, spin until
  903                  * the owner stops running or the state of the lock
  904                  * changes.
  905                  */
  906                 if (!(x & SX_LOCK_SHARED) &&
  907                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  908                         owner = (struct thread *)SX_OWNER(x);
  909                         if (TD_IS_RUNNING(owner)) {
  910                                 sleepq_release(&sx->lock_object);
  911                                 continue;
  912                         }
  913                 }
  914 #endif
  915 
  916                 /*
  917                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
  918                  * fail to set it drop the sleep queue lock and loop
  919                  * back.
  920                  */
  921                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
  922                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  923                             x | SX_LOCK_SHARED_WAITERS)) {
  924                                 sleepq_release(&sx->lock_object);
  925                                 continue;
  926                         }
  927                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  928                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
  929                                     __func__, sx);
  930                 }
  931 
  932                 /*
  933                  * Since we have been unable to acquire the shared lock,
  934                  * we have to sleep.
  935                  */
  936                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  937                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  938                             __func__, sx);
  939 
  940 #ifdef KDTRACE_HOOKS
  941                 sleep_time -= lockstat_nsecs(&sx->lock_object);
  942 #endif
  943                 GIANT_SAVE();
  944                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  945                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  946                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
  947                 if (!(opts & SX_INTERRUPTIBLE))
  948                         sleepq_wait(&sx->lock_object, 0);
  949                 else
  950                         error = sleepq_wait_sig(&sx->lock_object, 0);
  951 #ifdef KDTRACE_HOOKS
  952                 sleep_time += lockstat_nsecs(&sx->lock_object);
  953                 sleep_cnt++;
  954 #endif
  955                 if (error) {
  956                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  957                                 CTR2(KTR_LOCK,
  958                         "%s: interruptible sleep by %p suspended by signal",
  959                                     __func__, sx);
  960                         break;
  961                 }
  962                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  963                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  964                             __func__, sx);
  965         }
  966 #ifdef KDTRACE_HOOKS
  967         all_time += lockstat_nsecs(&sx->lock_object);
  968         if (sleep_time)
  969                 LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
  970                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
  971                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
  972         if (spin_cnt > sleep_cnt)
  973                 LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
  974                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
  975                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
  976 #endif
  977         if (error == 0)
  978                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
  979                     contested, waittime, file, line);
  980         GIANT_RESTORE();
  981         return (error);
  982 }
  983 
  984 /*
  985  * This function represents the so-called 'hard case' for sx_sunlock
  986  * operation.  All 'easy case' failures are redirected to this.  Note
  987  * that ideally this would be a static function, but it needs to be
  988  * accessible from at least sx.h.
  989  */
  990 void
  991 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
  992 {
  993         uintptr_t x;
  994         int wakeup_swapper;
  995 
  996         if (SCHEDULER_STOPPED())
  997                 return;
  998 
  999         for (;;) {
 1000                 x = sx->sx_lock;
 1001 
 1002                 /*
 1003                  * We should never have sharers while at least one thread
 1004                  * holds a shared lock.
 1005                  */
 1006                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
 1007                     ("%s: waiting sharers", __func__));
 1008 
 1009                 /*
 1010                  * See if there is more than one shared lock held.  If
 1011                  * so, just drop one and return.
 1012                  */
 1013                 if (SX_SHARERS(x) > 1) {
 1014                         if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
 1015                             x - SX_ONE_SHARER)) {
 1016                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
 1017                                         CTR4(KTR_LOCK,
 1018                                             "%s: %p succeeded %p -> %p",
 1019                                             __func__, sx, (void *)x,
 1020                                             (void *)(x - SX_ONE_SHARER));
 1021                                 break;
 1022                         }
 1023                         continue;
 1024                 }
 1025 
 1026                 /*
 1027                  * If there aren't any waiters for an exclusive lock,
 1028                  * then try to drop it quickly.
 1029                  */
 1030                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
 1031                         MPASS(x == SX_SHARERS_LOCK(1));
 1032                         if (atomic_cmpset_rel_ptr(&sx->sx_lock,
 1033                             SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
 1034                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
 1035                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
 1036                                             __func__, sx);
 1037                                 break;
 1038                         }
 1039                         continue;
 1040                 }
 1041 
 1042                 /*
 1043                  * At this point, there should just be one sharer with
 1044                  * exclusive waiters.
 1045                  */
 1046                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
 1047 
 1048                 sleepq_lock(&sx->lock_object);
 1049 
 1050                 /*
 1051                  * Wake up semantic here is quite simple:
 1052                  * Just wake up all the exclusive waiters.
 1053                  * Note that the state of the lock could have changed,
 1054                  * so if it fails loop back and retry.
 1055                  */
 1056                 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
 1057                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
 1058                     SX_LOCK_UNLOCKED)) {
 1059                         sleepq_release(&sx->lock_object);
 1060                         continue;
 1061                 }
 1062                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
 1063                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
 1064                             "exclusive queue", __func__, sx);
 1065                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
 1066                     0, SQ_EXCLUSIVE_QUEUE);
 1067                 sleepq_release(&sx->lock_object);
 1068                 if (wakeup_swapper)
 1069                         kick_proc0();
 1070                 break;
 1071         }
 1072 }
 1073 
 1074 #ifdef INVARIANT_SUPPORT
 1075 #ifndef INVARIANTS
 1076 #undef  _sx_assert
 1077 #endif
 1078 
 1079 /*
 1080  * In the non-WITNESS case, sx_assert() can only detect that at least
 1081  * *some* thread owns an slock, but it cannot guarantee that *this*
 1082  * thread owns an slock.
 1083  */
 1084 void
 1085 _sx_assert(struct sx *sx, int what, const char *file, int line)
 1086 {
 1087 #ifndef WITNESS
 1088         int slocked = 0;
 1089 #endif
 1090 
 1091         if (panicstr != NULL)
 1092                 return;
 1093         switch (what) {
 1094         case SA_SLOCKED:
 1095         case SA_SLOCKED | SA_NOTRECURSED:
 1096         case SA_SLOCKED | SA_RECURSED:
 1097 #ifndef WITNESS
 1098                 slocked = 1;
 1099                 /* FALLTHROUGH */
 1100 #endif
 1101         case SA_LOCKED:
 1102         case SA_LOCKED | SA_NOTRECURSED:
 1103         case SA_LOCKED | SA_RECURSED:
 1104 #ifdef WITNESS
 1105                 witness_assert(&sx->lock_object, what, file, line);
 1106 #else
 1107                 /*
 1108                  * If some other thread has an exclusive lock or we
 1109                  * have one and are asserting a shared lock, fail.
 1110                  * Also, if no one has a lock at all, fail.
 1111                  */
 1112                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
 1113                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
 1114                     sx_xholder(sx) != curthread)))
 1115                         panic("Lock %s not %slocked @ %s:%d\n",
 1116                             sx->lock_object.lo_name, slocked ? "share " : "",
 1117                             file, line);
 1118 
 1119                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
 1120                         if (sx_recursed(sx)) {
 1121                                 if (what & SA_NOTRECURSED)
 1122                                         panic("Lock %s recursed @ %s:%d\n",
 1123                                             sx->lock_object.lo_name, file,
 1124                                             line);
 1125                         } else if (what & SA_RECURSED)
 1126                                 panic("Lock %s not recursed @ %s:%d\n",
 1127                                     sx->lock_object.lo_name, file, line);
 1128                 }
 1129 #endif
 1130                 break;
 1131         case SA_XLOCKED:
 1132         case SA_XLOCKED | SA_NOTRECURSED:
 1133         case SA_XLOCKED | SA_RECURSED:
 1134                 if (sx_xholder(sx) != curthread)
 1135                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1136                             sx->lock_object.lo_name, file, line);
 1137                 if (sx_recursed(sx)) {
 1138                         if (what & SA_NOTRECURSED)
 1139                                 panic("Lock %s recursed @ %s:%d\n",
 1140                                     sx->lock_object.lo_name, file, line);
 1141                 } else if (what & SA_RECURSED)
 1142                         panic("Lock %s not recursed @ %s:%d\n",
 1143                             sx->lock_object.lo_name, file, line);
 1144                 break;
 1145         case SA_UNLOCKED:
 1146 #ifdef WITNESS
 1147                 witness_assert(&sx->lock_object, what, file, line);
 1148 #else
 1149                 /*
 1150                  * If we hold an exclusve lock fail.  We can't
 1151                  * reliably check to see if we hold a shared lock or
 1152                  * not.
 1153                  */
 1154                 if (sx_xholder(sx) == curthread)
 1155                         panic("Lock %s exclusively locked @ %s:%d\n",
 1156                             sx->lock_object.lo_name, file, line);
 1157 #endif
 1158                 break;
 1159         default:
 1160                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
 1161                     line);
 1162         }
 1163 }
 1164 #endif  /* INVARIANT_SUPPORT */
 1165 
 1166 #ifdef DDB
 1167 static void
 1168 db_show_sx(struct lock_object *lock)
 1169 {
 1170         struct thread *td;
 1171         struct sx *sx;
 1172 
 1173         sx = (struct sx *)lock;
 1174 
 1175         db_printf(" state: ");
 1176         if (sx->sx_lock == SX_LOCK_UNLOCKED)
 1177                 db_printf("UNLOCKED\n");
 1178         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
 1179                 db_printf("DESTROYED\n");
 1180                 return;
 1181         } else if (sx->sx_lock & SX_LOCK_SHARED)
 1182                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
 1183         else {
 1184                 td = sx_xholder(sx);
 1185                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1186                     td->td_tid, td->td_proc->p_pid, td->td_name);
 1187                 if (sx_recursed(sx))
 1188                         db_printf(" recursed: %d\n", sx->sx_recurse);
 1189         }
 1190 
 1191         db_printf(" waiters: ");
 1192         switch(sx->sx_lock &
 1193             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
 1194         case SX_LOCK_SHARED_WAITERS:
 1195                 db_printf("shared\n");
 1196                 break;
 1197         case SX_LOCK_EXCLUSIVE_WAITERS:
 1198                 db_printf("exclusive\n");
 1199                 break;
 1200         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
 1201                 db_printf("exclusive and shared\n");
 1202                 break;
 1203         default:
 1204                 db_printf("none\n");
 1205         }
 1206 }
 1207 
 1208 /*
 1209  * Check to see if a thread that is blocked on a sleep queue is actually
 1210  * blocked on an sx lock.  If so, output some details and return true.
 1211  * If the lock has an exclusive owner, return that in *ownerp.
 1212  */
 1213 int
 1214 sx_chain(struct thread *td, struct thread **ownerp)
 1215 {
 1216         struct sx *sx;
 1217 
 1218         /*
 1219          * Check to see if this thread is blocked on an sx lock.
 1220          * First, we check the lock class.  If that is ok, then we
 1221          * compare the lock name against the wait message.
 1222          */
 1223         sx = td->td_wchan;
 1224         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
 1225             sx->lock_object.lo_name != td->td_wmesg)
 1226                 return (0);
 1227 
 1228         /* We think we have an sx lock, so output some details. */
 1229         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
 1230         *ownerp = sx_xholder(sx);
 1231         if (sx->sx_lock & SX_LOCK_SHARED)
 1232                 db_printf("SLOCK (count %ju)\n",
 1233                     (uintmax_t)SX_SHARERS(sx->sx_lock));
 1234         else
 1235                 db_printf("XLOCK\n");
 1236         return (1);
 1237 }
 1238 #endif

Cache object: aa69ef4b59ebb72057d0037acbd5a9fd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.