The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
    3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice(s), this list of conditions and the following disclaimer as
   11  *    the first lines of this file unmodified other than the possible
   12  *    addition of one or more copyright notices.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice(s), this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   27  * DAMAGE.
   28  */
   29 
   30 /*
   31  * Shared/exclusive locks.  This implementation attempts to ensure
   32  * deterministic lock granting behavior, so that slocks and xlocks are
   33  * interleaved.
   34  *
   35  * Priority propagation will not generally raise the priority of lock holders,
   36  * so should not be relied upon in combination with sx locks.
   37  */
   38 
   39 #include "opt_ddb.h"
   40 #include "opt_hwpmc_hooks.h"
   41 #include "opt_kdtrace.h"
   42 #include "opt_no_adaptive_sx.h"
   43 
   44 #include <sys/cdefs.h>
   45 __FBSDID("$FreeBSD: releng/10.1/sys/kern/kern_sx.c 255788 2013-09-22 14:09:07Z davide $");
   46 
   47 #include <sys/param.h>
   48 #include <sys/systm.h>
   49 #include <sys/kdb.h>
   50 #include <sys/ktr.h>
   51 #include <sys/lock.h>
   52 #include <sys/mutex.h>
   53 #include <sys/proc.h>
   54 #include <sys/sleepqueue.h>
   55 #include <sys/sx.h>
   56 #include <sys/sysctl.h>
   57 
   58 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
   59 #include <machine/cpu.h>
   60 #endif
   61 
   62 #ifdef DDB
   63 #include <ddb/ddb.h>
   64 #endif
   65 
   66 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
   67 #define ADAPTIVE_SX
   68 #endif
   69 
   70 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
   71 
   72 #ifdef HWPMC_HOOKS
   73 #include <sys/pmckern.h>
   74 PMC_SOFT_DECLARE( , , lock, failed);
   75 #endif
   76 
   77 /* Handy macros for sleep queues. */
   78 #define SQ_EXCLUSIVE_QUEUE      0
   79 #define SQ_SHARED_QUEUE         1
   80 
   81 /*
   82  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
   83  * drop Giant anytime we have to sleep or if we adaptively spin.
   84  */
   85 #define GIANT_DECLARE                                                   \
   86         int _giantcnt = 0;                                              \
   87         WITNESS_SAVE_DECL(Giant)                                        \
   88 
   89 #define GIANT_SAVE() do {                                               \
   90         if (mtx_owned(&Giant)) {                                        \
   91                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
   92                 while (mtx_owned(&Giant)) {                             \
   93                         _giantcnt++;                                    \
   94                         mtx_unlock(&Giant);                             \
   95                 }                                                       \
   96         }                                                               \
   97 } while (0)
   98 
   99 #define GIANT_RESTORE() do {                                            \
  100         if (_giantcnt > 0) {                                            \
  101                 mtx_assert(&Giant, MA_NOTOWNED);                        \
  102                 while (_giantcnt--)                                     \
  103                         mtx_lock(&Giant);                               \
  104                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  105         }                                                               \
  106 } while (0)
  107 
  108 /*
  109  * Returns true if an exclusive lock is recursed.  It assumes
  110  * curthread currently has an exclusive lock.
  111  */
  112 #define sx_recurse              lock_object.lo_data
  113 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
  114 
  115 static void     assert_sx(const struct lock_object *lock, int what);
  116 #ifdef DDB
  117 static void     db_show_sx(const struct lock_object *lock);
  118 #endif
  119 static void     lock_sx(struct lock_object *lock, uintptr_t how);
  120 #ifdef KDTRACE_HOOKS
  121 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
  122 #endif
  123 static uintptr_t unlock_sx(struct lock_object *lock);
  124 
  125 struct lock_class lock_class_sx = {
  126         .lc_name = "sx",
  127         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
  128         .lc_assert = assert_sx,
  129 #ifdef DDB
  130         .lc_ddb_show = db_show_sx,
  131 #endif
  132         .lc_lock = lock_sx,
  133         .lc_unlock = unlock_sx,
  134 #ifdef KDTRACE_HOOKS
  135         .lc_owner = owner_sx,
  136 #endif
  137 };
  138 
  139 #ifndef INVARIANTS
  140 #define _sx_assert(sx, what, file, line)
  141 #endif
  142 
  143 #ifdef ADAPTIVE_SX
  144 static u_int asx_retries = 10;
  145 static u_int asx_loops = 10000;
  146 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
  147 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
  148 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
  149 #endif
  150 
  151 void
  152 assert_sx(const struct lock_object *lock, int what)
  153 {
  154 
  155         sx_assert((const struct sx *)lock, what);
  156 }
  157 
  158 void
  159 lock_sx(struct lock_object *lock, uintptr_t how)
  160 {
  161         struct sx *sx;
  162 
  163         sx = (struct sx *)lock;
  164         if (how)
  165                 sx_slock(sx);
  166         else
  167                 sx_xlock(sx);
  168 }
  169 
  170 uintptr_t
  171 unlock_sx(struct lock_object *lock)
  172 {
  173         struct sx *sx;
  174 
  175         sx = (struct sx *)lock;
  176         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
  177         if (sx_xlocked(sx)) {
  178                 sx_xunlock(sx);
  179                 return (0);
  180         } else {
  181                 sx_sunlock(sx);
  182                 return (1);
  183         }
  184 }
  185 
  186 #ifdef KDTRACE_HOOKS
  187 int
  188 owner_sx(const struct lock_object *lock, struct thread **owner)
  189 {
  190         const struct sx *sx = (const struct sx *)lock;
  191         uintptr_t x = sx->sx_lock;
  192 
  193         *owner = (struct thread *)SX_OWNER(x);
  194         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
  195             (*owner != NULL));
  196 }
  197 #endif
  198 
  199 void
  200 sx_sysinit(void *arg)
  201 {
  202         struct sx_args *sargs = arg;
  203 
  204         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
  205 }
  206 
  207 void
  208 sx_init_flags(struct sx *sx, const char *description, int opts)
  209 {
  210         int flags;
  211 
  212         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
  213             SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
  214         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
  215             ("%s: sx_lock not aligned for %s: %p", __func__, description,
  216             &sx->sx_lock));
  217 
  218         flags = LO_SLEEPABLE | LO_UPGRADABLE;
  219         if (opts & SX_DUPOK)
  220                 flags |= LO_DUPOK;
  221         if (opts & SX_NOPROFILE)
  222                 flags |= LO_NOPROFILE;
  223         if (!(opts & SX_NOWITNESS))
  224                 flags |= LO_WITNESS;
  225         if (opts & SX_RECURSE)
  226                 flags |= LO_RECURSABLE;
  227         if (opts & SX_QUIET)
  228                 flags |= LO_QUIET;
  229 
  230         flags |= opts & SX_NOADAPTIVE;
  231         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
  232         sx->sx_lock = SX_LOCK_UNLOCKED;
  233         sx->sx_recurse = 0;
  234 }
  235 
  236 void
  237 sx_destroy(struct sx *sx)
  238 {
  239 
  240         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
  241         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
  242         sx->sx_lock = SX_LOCK_DESTROYED;
  243         lock_destroy(&sx->lock_object);
  244 }
  245 
  246 int
  247 _sx_slock(struct sx *sx, int opts, const char *file, int line)
  248 {
  249         int error = 0;
  250 
  251         if (SCHEDULER_STOPPED())
  252                 return (0);
  253         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  254             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
  255             curthread, sx->lock_object.lo_name, file, line));
  256         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  257             ("sx_slock() of destroyed sx @ %s:%d", file, line));
  258         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
  259         error = __sx_slock(sx, opts, file, line);
  260         if (!error) {
  261                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
  262                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
  263                 curthread->td_locks++;
  264         }
  265 
  266         return (error);
  267 }
  268 
  269 int
  270 sx_try_slock_(struct sx *sx, const char *file, int line)
  271 {
  272         uintptr_t x;
  273 
  274         if (SCHEDULER_STOPPED())
  275                 return (1);
  276 
  277         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  278             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
  279             curthread, sx->lock_object.lo_name, file, line));
  280 
  281         for (;;) {
  282                 x = sx->sx_lock;
  283                 KASSERT(x != SX_LOCK_DESTROYED,
  284                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
  285                 if (!(x & SX_LOCK_SHARED))
  286                         break;
  287                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
  288                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
  289                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
  290                         curthread->td_locks++;
  291                         return (1);
  292                 }
  293         }
  294 
  295         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
  296         return (0);
  297 }
  298 
  299 int
  300 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
  301 {
  302         int error = 0;
  303 
  304         if (SCHEDULER_STOPPED())
  305                 return (0);
  306         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  307             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
  308             curthread, sx->lock_object.lo_name, file, line));
  309         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  310             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
  311         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
  312             line, NULL);
  313         error = __sx_xlock(sx, curthread, opts, file, line);
  314         if (!error) {
  315                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
  316                     file, line);
  317                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  318                 curthread->td_locks++;
  319         }
  320 
  321         return (error);
  322 }
  323 
  324 int
  325 sx_try_xlock_(struct sx *sx, const char *file, int line)
  326 {
  327         int rval;
  328 
  329         if (SCHEDULER_STOPPED())
  330                 return (1);
  331 
  332         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  333             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
  334             curthread, sx->lock_object.lo_name, file, line));
  335         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  336             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
  337 
  338         if (sx_xlocked(sx) &&
  339             (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  340                 sx->sx_recurse++;
  341                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  342                 rval = 1;
  343         } else
  344                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
  345                     (uintptr_t)curthread);
  346         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
  347         if (rval) {
  348                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  349                     file, line);
  350                 curthread->td_locks++;
  351         }
  352 
  353         return (rval);
  354 }
  355 
  356 void
  357 _sx_sunlock(struct sx *sx, const char *file, int line)
  358 {
  359 
  360         if (SCHEDULER_STOPPED())
  361                 return;
  362         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  363             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
  364         _sx_assert(sx, SA_SLOCKED, file, line);
  365         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
  366         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
  367         __sx_sunlock(sx, file, line);
  368         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
  369         curthread->td_locks--;
  370 }
  371 
  372 void
  373 _sx_xunlock(struct sx *sx, const char *file, int line)
  374 {
  375 
  376         if (SCHEDULER_STOPPED())
  377                 return;
  378         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  379             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
  380         _sx_assert(sx, SA_XLOCKED, file, line);
  381         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  382         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
  383             line);
  384         if (!sx_recursed(sx))
  385                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
  386         __sx_xunlock(sx, curthread, file, line);
  387         curthread->td_locks--;
  388 }
  389 
  390 /*
  391  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
  392  * This will only succeed if this thread holds a single shared lock.
  393  * Return 1 if if the upgrade succeed, 0 otherwise.
  394  */
  395 int
  396 sx_try_upgrade_(struct sx *sx, const char *file, int line)
  397 {
  398         uintptr_t x;
  399         int success;
  400 
  401         if (SCHEDULER_STOPPED())
  402                 return (1);
  403 
  404         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  405             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
  406         _sx_assert(sx, SA_SLOCKED, file, line);
  407 
  408         /*
  409          * Try to switch from one shared lock to an exclusive lock.  We need
  410          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
  411          * we will wake up the exclusive waiters when we drop the lock.
  412          */
  413         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
  414         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
  415             (uintptr_t)curthread | x);
  416         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
  417         if (success) {
  418                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  419                     file, line);
  420                 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
  421         }
  422         return (success);
  423 }
  424 
  425 /*
  426  * Downgrade an unrecursed exclusive lock into a single shared lock.
  427  */
  428 void
  429 sx_downgrade_(struct sx *sx, const char *file, int line)
  430 {
  431         uintptr_t x;
  432         int wakeup_swapper;
  433 
  434         if (SCHEDULER_STOPPED())
  435                 return;
  436 
  437         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  438             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
  439         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
  440 #ifndef INVARIANTS
  441         if (sx_recursed(sx))
  442                 panic("downgrade of a recursed lock");
  443 #endif
  444 
  445         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
  446 
  447         /*
  448          * Try to switch from an exclusive lock with no shared waiters
  449          * to one sharer with no shared waiters.  If there are
  450          * exclusive waiters, we don't need to lock the sleep queue so
  451          * long as we preserve the flag.  We do one quick try and if
  452          * that fails we grab the sleepq lock to keep the flags from
  453          * changing and do it the slow way.
  454          *
  455          * We have to lock the sleep queue if there are shared waiters
  456          * so we can wake them up.
  457          */
  458         x = sx->sx_lock;
  459         if (!(x & SX_LOCK_SHARED_WAITERS) &&
  460             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
  461             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
  462                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  463                 return;
  464         }
  465 
  466         /*
  467          * Lock the sleep queue so we can read the waiters bits
  468          * without any races and wakeup any shared waiters.
  469          */
  470         sleepq_lock(&sx->lock_object);
  471 
  472         /*
  473          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
  474          * shared lock.  If there are any shared waiters, wake them up.
  475          */
  476         wakeup_swapper = 0;
  477         x = sx->sx_lock;
  478         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
  479             (x & SX_LOCK_EXCLUSIVE_WAITERS));
  480         if (x & SX_LOCK_SHARED_WAITERS)
  481                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
  482                     0, SQ_SHARED_QUEUE);
  483         sleepq_release(&sx->lock_object);
  484 
  485         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  486         LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
  487 
  488         if (wakeup_swapper)
  489                 kick_proc0();
  490 }
  491 
  492 /*
  493  * This function represents the so-called 'hard case' for sx_xlock
  494  * operation.  All 'easy case' failures are redirected to this.  Note
  495  * that ideally this would be a static function, but it needs to be
  496  * accessible from at least sx.h.
  497  */
  498 int
  499 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
  500     int line)
  501 {
  502         GIANT_DECLARE;
  503 #ifdef ADAPTIVE_SX
  504         volatile struct thread *owner;
  505         u_int i, spintries = 0;
  506 #endif
  507         uintptr_t x;
  508 #ifdef LOCK_PROFILING
  509         uint64_t waittime = 0;
  510         int contested = 0;
  511 #endif
  512         int error = 0;
  513 #ifdef  KDTRACE_HOOKS
  514         uint64_t spin_cnt = 0;
  515         uint64_t sleep_cnt = 0;
  516         int64_t sleep_time = 0;
  517 #endif
  518 
  519         if (SCHEDULER_STOPPED())
  520                 return (0);
  521 
  522         /* If we already hold an exclusive lock, then recurse. */
  523         if (sx_xlocked(sx)) {
  524                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
  525             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
  526                     sx->lock_object.lo_name, file, line));
  527                 sx->sx_recurse++;
  528                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  529                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  530                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
  531                 return (0);
  532         }
  533 
  534         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  535                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
  536                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
  537 
  538         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
  539 #ifdef KDTRACE_HOOKS
  540                 spin_cnt++;
  541 #endif
  542 #ifdef HWPMC_HOOKS
  543                 PMC_SOFT_CALL( , , lock, failed);
  544 #endif
  545                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  546                     &waittime);
  547 #ifdef ADAPTIVE_SX
  548                 /*
  549                  * If the lock is write locked and the owner is
  550                  * running on another CPU, spin until the owner stops
  551                  * running or the state of the lock changes.
  552                  */
  553                 x = sx->sx_lock;
  554                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  555                         if ((x & SX_LOCK_SHARED) == 0) {
  556                                 x = SX_OWNER(x);
  557                                 owner = (struct thread *)x;
  558                                 if (TD_IS_RUNNING(owner)) {
  559                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  560                                                 CTR3(KTR_LOCK,
  561                                             "%s: spinning on %p held by %p",
  562                                                     __func__, sx, owner);
  563                                         GIANT_SAVE();
  564                                         while (SX_OWNER(sx->sx_lock) == x &&
  565                                             TD_IS_RUNNING(owner)) {
  566                                                 cpu_spinwait();
  567 #ifdef KDTRACE_HOOKS
  568                                                 spin_cnt++;
  569 #endif
  570                                         }
  571                                         continue;
  572                                 }
  573                         } else if (SX_SHARERS(x) && spintries < asx_retries) {
  574                                 GIANT_SAVE();
  575                                 spintries++;
  576                                 for (i = 0; i < asx_loops; i++) {
  577                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  578                                                 CTR4(KTR_LOCK,
  579                                     "%s: shared spinning on %p with %u and %u",
  580                                                     __func__, sx, spintries, i);
  581                                         x = sx->sx_lock;
  582                                         if ((x & SX_LOCK_SHARED) == 0 ||
  583                                             SX_SHARERS(x) == 0)
  584                                                 break;
  585                                         cpu_spinwait();
  586 #ifdef KDTRACE_HOOKS
  587                                         spin_cnt++;
  588 #endif
  589                                 }
  590                                 if (i != asx_loops)
  591                                         continue;
  592                         }
  593                 }
  594 #endif
  595 
  596                 sleepq_lock(&sx->lock_object);
  597                 x = sx->sx_lock;
  598 
  599                 /*
  600                  * If the lock was released while spinning on the
  601                  * sleep queue chain lock, try again.
  602                  */
  603                 if (x == SX_LOCK_UNLOCKED) {
  604                         sleepq_release(&sx->lock_object);
  605                         continue;
  606                 }
  607 
  608 #ifdef ADAPTIVE_SX
  609                 /*
  610                  * The current lock owner might have started executing
  611                  * on another CPU (or the lock could have changed
  612                  * owners) while we were waiting on the sleep queue
  613                  * chain lock.  If so, drop the sleep queue lock and try
  614                  * again.
  615                  */
  616                 if (!(x & SX_LOCK_SHARED) &&
  617                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  618                         owner = (struct thread *)SX_OWNER(x);
  619                         if (TD_IS_RUNNING(owner)) {
  620                                 sleepq_release(&sx->lock_object);
  621                                 continue;
  622                         }
  623                 }
  624 #endif
  625 
  626                 /*
  627                  * If an exclusive lock was released with both shared
  628                  * and exclusive waiters and a shared waiter hasn't
  629                  * woken up and acquired the lock yet, sx_lock will be
  630                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
  631                  * If we see that value, try to acquire it once.  Note
  632                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
  633                  * as there are other exclusive waiters still.  If we
  634                  * fail, restart the loop.
  635                  */
  636                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
  637                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
  638                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
  639                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
  640                                 sleepq_release(&sx->lock_object);
  641                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
  642                                     __func__, sx);
  643                                 break;
  644                         }
  645                         sleepq_release(&sx->lock_object);
  646                         continue;
  647                 }
  648 
  649                 /*
  650                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
  651                  * than loop back and retry.
  652                  */
  653                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
  654                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  655                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
  656                                 sleepq_release(&sx->lock_object);
  657                                 continue;
  658                         }
  659                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  660                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
  661                                     __func__, sx);
  662                 }
  663 
  664                 /*
  665                  * Since we have been unable to acquire the exclusive
  666                  * lock and the exclusive waiters flag is set, we have
  667                  * to sleep.
  668                  */
  669                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  670                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  671                             __func__, sx);
  672 
  673 #ifdef KDTRACE_HOOKS
  674                 sleep_time -= lockstat_nsecs();
  675 #endif
  676                 GIANT_SAVE();
  677                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  678                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  679                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
  680                 if (!(opts & SX_INTERRUPTIBLE))
  681                         sleepq_wait(&sx->lock_object, 0);
  682                 else
  683                         error = sleepq_wait_sig(&sx->lock_object, 0);
  684 #ifdef KDTRACE_HOOKS
  685                 sleep_time += lockstat_nsecs();
  686                 sleep_cnt++;
  687 #endif
  688                 if (error) {
  689                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  690                                 CTR2(KTR_LOCK,
  691                         "%s: interruptible sleep by %p suspended by signal",
  692                                     __func__, sx);
  693                         break;
  694                 }
  695                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  696                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  697                             __func__, sx);
  698         }
  699 
  700         GIANT_RESTORE();
  701         if (!error)
  702                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
  703                     contested, waittime, file, line);
  704 #ifdef KDTRACE_HOOKS
  705         if (sleep_time)
  706                 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
  707         if (spin_cnt > sleep_cnt)
  708                 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
  709 #endif
  710         return (error);
  711 }
  712 
  713 /*
  714  * This function represents the so-called 'hard case' for sx_xunlock
  715  * operation.  All 'easy case' failures are redirected to this.  Note
  716  * that ideally this would be a static function, but it needs to be
  717  * accessible from at least sx.h.
  718  */
  719 void
  720 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
  721 {
  722         uintptr_t x;
  723         int queue, wakeup_swapper;
  724 
  725         if (SCHEDULER_STOPPED())
  726                 return;
  727 
  728         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
  729 
  730         /* If the lock is recursed, then unrecurse one level. */
  731         if (sx_xlocked(sx) && sx_recursed(sx)) {
  732                 if ((--sx->sx_recurse) == 0)
  733                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  734                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  735                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
  736                 return;
  737         }
  738         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
  739             SX_LOCK_EXCLUSIVE_WAITERS));
  740         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  741                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
  742 
  743         sleepq_lock(&sx->lock_object);
  744         x = SX_LOCK_UNLOCKED;
  745 
  746         /*
  747          * The wake up algorithm here is quite simple and probably not
  748          * ideal.  It gives precedence to shared waiters if they are
  749          * present.  For this condition, we have to preserve the
  750          * state of the exclusive waiters flag.
  751          * If interruptible sleeps left the shared queue empty avoid a
  752          * starvation for the threads sleeping on the exclusive queue by giving
  753          * them precedence and cleaning up the shared waiters bit anyway.
  754          */
  755         if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
  756             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
  757                 queue = SQ_SHARED_QUEUE;
  758                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
  759         } else
  760                 queue = SQ_EXCLUSIVE_QUEUE;
  761 
  762         /* Wake up all the waiters for the specific queue. */
  763         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  764                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
  765                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
  766                     "exclusive");
  767         atomic_store_rel_ptr(&sx->sx_lock, x);
  768         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
  769             queue);
  770         sleepq_release(&sx->lock_object);
  771         if (wakeup_swapper)
  772                 kick_proc0();
  773 }
  774 
  775 /*
  776  * This function represents the so-called 'hard case' for sx_slock
  777  * operation.  All 'easy case' failures are redirected to this.  Note
  778  * that ideally this would be a static function, but it needs to be
  779  * accessible from at least sx.h.
  780  */
  781 int
  782 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
  783 {
  784         GIANT_DECLARE;
  785 #ifdef ADAPTIVE_SX
  786         volatile struct thread *owner;
  787 #endif
  788 #ifdef LOCK_PROFILING
  789         uint64_t waittime = 0;
  790         int contested = 0;
  791 #endif
  792         uintptr_t x;
  793         int error = 0;
  794 #ifdef KDTRACE_HOOKS
  795         uint64_t spin_cnt = 0;
  796         uint64_t sleep_cnt = 0;
  797         int64_t sleep_time = 0;
  798 #endif
  799 
  800         if (SCHEDULER_STOPPED())
  801                 return (0);
  802 
  803         /*
  804          * As with rwlocks, we don't make any attempt to try to block
  805          * shared locks once there is an exclusive waiter.
  806          */
  807         for (;;) {
  808 #ifdef KDTRACE_HOOKS
  809                 spin_cnt++;
  810 #endif
  811                 x = sx->sx_lock;
  812 
  813                 /*
  814                  * If no other thread has an exclusive lock then try to bump up
  815                  * the count of sharers.  Since we have to preserve the state
  816                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
  817                  * shared lock loop back and retry.
  818                  */
  819                 if (x & SX_LOCK_SHARED) {
  820                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
  821                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
  822                             x + SX_ONE_SHARER)) {
  823                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  824                                         CTR4(KTR_LOCK,
  825                                             "%s: %p succeed %p -> %p", __func__,
  826                                             sx, (void *)x,
  827                                             (void *)(x + SX_ONE_SHARER));
  828                                 break;
  829                         }
  830                         continue;
  831                 }
  832 #ifdef HWPMC_HOOKS
  833                 PMC_SOFT_CALL( , , lock, failed);
  834 #endif
  835                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  836                     &waittime);
  837 
  838 #ifdef ADAPTIVE_SX
  839                 /*
  840                  * If the owner is running on another CPU, spin until
  841                  * the owner stops running or the state of the lock
  842                  * changes.
  843                  */
  844                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  845                         x = SX_OWNER(x);
  846                         owner = (struct thread *)x;
  847                         if (TD_IS_RUNNING(owner)) {
  848                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  849                                         CTR3(KTR_LOCK,
  850                                             "%s: spinning on %p held by %p",
  851                                             __func__, sx, owner);
  852                                 GIANT_SAVE();
  853                                 while (SX_OWNER(sx->sx_lock) == x &&
  854                                     TD_IS_RUNNING(owner)) {
  855 #ifdef KDTRACE_HOOKS
  856                                         spin_cnt++;
  857 #endif
  858                                         cpu_spinwait();
  859                                 }
  860                                 continue;
  861                         }
  862                 }
  863 #endif
  864 
  865                 /*
  866                  * Some other thread already has an exclusive lock, so
  867                  * start the process of blocking.
  868                  */
  869                 sleepq_lock(&sx->lock_object);
  870                 x = sx->sx_lock;
  871 
  872                 /*
  873                  * The lock could have been released while we spun.
  874                  * In this case loop back and retry.
  875                  */
  876                 if (x & SX_LOCK_SHARED) {
  877                         sleepq_release(&sx->lock_object);
  878                         continue;
  879                 }
  880 
  881 #ifdef ADAPTIVE_SX
  882                 /*
  883                  * If the owner is running on another CPU, spin until
  884                  * the owner stops running or the state of the lock
  885                  * changes.
  886                  */
  887                 if (!(x & SX_LOCK_SHARED) &&
  888                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  889                         owner = (struct thread *)SX_OWNER(x);
  890                         if (TD_IS_RUNNING(owner)) {
  891                                 sleepq_release(&sx->lock_object);
  892                                 continue;
  893                         }
  894                 }
  895 #endif
  896 
  897                 /*
  898                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
  899                  * fail to set it drop the sleep queue lock and loop
  900                  * back.
  901                  */
  902                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
  903                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  904                             x | SX_LOCK_SHARED_WAITERS)) {
  905                                 sleepq_release(&sx->lock_object);
  906                                 continue;
  907                         }
  908                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  909                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
  910                                     __func__, sx);
  911                 }
  912 
  913                 /*
  914                  * Since we have been unable to acquire the shared lock,
  915                  * we have to sleep.
  916                  */
  917                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  918                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  919                             __func__, sx);
  920 
  921 #ifdef KDTRACE_HOOKS
  922                 sleep_time -= lockstat_nsecs();
  923 #endif
  924                 GIANT_SAVE();
  925                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  926                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  927                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
  928                 if (!(opts & SX_INTERRUPTIBLE))
  929                         sleepq_wait(&sx->lock_object, 0);
  930                 else
  931                         error = sleepq_wait_sig(&sx->lock_object, 0);
  932 #ifdef KDTRACE_HOOKS
  933                 sleep_time += lockstat_nsecs();
  934                 sleep_cnt++;
  935 #endif
  936                 if (error) {
  937                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  938                                 CTR2(KTR_LOCK,
  939                         "%s: interruptible sleep by %p suspended by signal",
  940                                     __func__, sx);
  941                         break;
  942                 }
  943                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  944                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  945                             __func__, sx);
  946         }
  947         if (error == 0)
  948                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
  949                     contested, waittime, file, line);
  950 #ifdef KDTRACE_HOOKS
  951         if (sleep_time)
  952                 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
  953         if (spin_cnt > sleep_cnt)
  954                 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
  955 #endif
  956         GIANT_RESTORE();
  957         return (error);
  958 }
  959 
  960 /*
  961  * This function represents the so-called 'hard case' for sx_sunlock
  962  * operation.  All 'easy case' failures are redirected to this.  Note
  963  * that ideally this would be a static function, but it needs to be
  964  * accessible from at least sx.h.
  965  */
  966 void
  967 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
  968 {
  969         uintptr_t x;
  970         int wakeup_swapper;
  971 
  972         if (SCHEDULER_STOPPED())
  973                 return;
  974 
  975         for (;;) {
  976                 x = sx->sx_lock;
  977 
  978                 /*
  979                  * We should never have sharers while at least one thread
  980                  * holds a shared lock.
  981                  */
  982                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
  983                     ("%s: waiting sharers", __func__));
  984 
  985                 /*
  986                  * See if there is more than one shared lock held.  If
  987                  * so, just drop one and return.
  988                  */
  989                 if (SX_SHARERS(x) > 1) {
  990                         if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
  991                             x - SX_ONE_SHARER)) {
  992                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  993                                         CTR4(KTR_LOCK,
  994                                             "%s: %p succeeded %p -> %p",
  995                                             __func__, sx, (void *)x,
  996                                             (void *)(x - SX_ONE_SHARER));
  997                                 break;
  998                         }
  999                         continue;
 1000                 }
 1001 
 1002                 /*
 1003                  * If there aren't any waiters for an exclusive lock,
 1004                  * then try to drop it quickly.
 1005                  */
 1006                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
 1007                         MPASS(x == SX_SHARERS_LOCK(1));
 1008                         if (atomic_cmpset_rel_ptr(&sx->sx_lock,
 1009                             SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
 1010                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
 1011                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
 1012                                             __func__, sx);
 1013                                 break;
 1014                         }
 1015                         continue;
 1016                 }
 1017 
 1018                 /*
 1019                  * At this point, there should just be one sharer with
 1020                  * exclusive waiters.
 1021                  */
 1022                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
 1023 
 1024                 sleepq_lock(&sx->lock_object);
 1025 
 1026                 /*
 1027                  * Wake up semantic here is quite simple:
 1028                  * Just wake up all the exclusive waiters.
 1029                  * Note that the state of the lock could have changed,
 1030                  * so if it fails loop back and retry.
 1031                  */
 1032                 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
 1033                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
 1034                     SX_LOCK_UNLOCKED)) {
 1035                         sleepq_release(&sx->lock_object);
 1036                         continue;
 1037                 }
 1038                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
 1039                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
 1040                             "exclusive queue", __func__, sx);
 1041                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
 1042                     0, SQ_EXCLUSIVE_QUEUE);
 1043                 sleepq_release(&sx->lock_object);
 1044                 if (wakeup_swapper)
 1045                         kick_proc0();
 1046                 break;
 1047         }
 1048 }
 1049 
 1050 #ifdef INVARIANT_SUPPORT
 1051 #ifndef INVARIANTS
 1052 #undef  _sx_assert
 1053 #endif
 1054 
 1055 /*
 1056  * In the non-WITNESS case, sx_assert() can only detect that at least
 1057  * *some* thread owns an slock, but it cannot guarantee that *this*
 1058  * thread owns an slock.
 1059  */
 1060 void
 1061 _sx_assert(const struct sx *sx, int what, const char *file, int line)
 1062 {
 1063 #ifndef WITNESS
 1064         int slocked = 0;
 1065 #endif
 1066 
 1067         if (panicstr != NULL)
 1068                 return;
 1069         switch (what) {
 1070         case SA_SLOCKED:
 1071         case SA_SLOCKED | SA_NOTRECURSED:
 1072         case SA_SLOCKED | SA_RECURSED:
 1073 #ifndef WITNESS
 1074                 slocked = 1;
 1075                 /* FALLTHROUGH */
 1076 #endif
 1077         case SA_LOCKED:
 1078         case SA_LOCKED | SA_NOTRECURSED:
 1079         case SA_LOCKED | SA_RECURSED:
 1080 #ifdef WITNESS
 1081                 witness_assert(&sx->lock_object, what, file, line);
 1082 #else
 1083                 /*
 1084                  * If some other thread has an exclusive lock or we
 1085                  * have one and are asserting a shared lock, fail.
 1086                  * Also, if no one has a lock at all, fail.
 1087                  */
 1088                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
 1089                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
 1090                     sx_xholder(sx) != curthread)))
 1091                         panic("Lock %s not %slocked @ %s:%d\n",
 1092                             sx->lock_object.lo_name, slocked ? "share " : "",
 1093                             file, line);
 1094 
 1095                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
 1096                         if (sx_recursed(sx)) {
 1097                                 if (what & SA_NOTRECURSED)
 1098                                         panic("Lock %s recursed @ %s:%d\n",
 1099                                             sx->lock_object.lo_name, file,
 1100                                             line);
 1101                         } else if (what & SA_RECURSED)
 1102                                 panic("Lock %s not recursed @ %s:%d\n",
 1103                                     sx->lock_object.lo_name, file, line);
 1104                 }
 1105 #endif
 1106                 break;
 1107         case SA_XLOCKED:
 1108         case SA_XLOCKED | SA_NOTRECURSED:
 1109         case SA_XLOCKED | SA_RECURSED:
 1110                 if (sx_xholder(sx) != curthread)
 1111                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1112                             sx->lock_object.lo_name, file, line);
 1113                 if (sx_recursed(sx)) {
 1114                         if (what & SA_NOTRECURSED)
 1115                                 panic("Lock %s recursed @ %s:%d\n",
 1116                                     sx->lock_object.lo_name, file, line);
 1117                 } else if (what & SA_RECURSED)
 1118                         panic("Lock %s not recursed @ %s:%d\n",
 1119                             sx->lock_object.lo_name, file, line);
 1120                 break;
 1121         case SA_UNLOCKED:
 1122 #ifdef WITNESS
 1123                 witness_assert(&sx->lock_object, what, file, line);
 1124 #else
 1125                 /*
 1126                  * If we hold an exclusve lock fail.  We can't
 1127                  * reliably check to see if we hold a shared lock or
 1128                  * not.
 1129                  */
 1130                 if (sx_xholder(sx) == curthread)
 1131                         panic("Lock %s exclusively locked @ %s:%d\n",
 1132                             sx->lock_object.lo_name, file, line);
 1133 #endif
 1134                 break;
 1135         default:
 1136                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
 1137                     line);
 1138         }
 1139 }
 1140 #endif  /* INVARIANT_SUPPORT */
 1141 
 1142 #ifdef DDB
 1143 static void
 1144 db_show_sx(const struct lock_object *lock)
 1145 {
 1146         struct thread *td;
 1147         const struct sx *sx;
 1148 
 1149         sx = (const struct sx *)lock;
 1150 
 1151         db_printf(" state: ");
 1152         if (sx->sx_lock == SX_LOCK_UNLOCKED)
 1153                 db_printf("UNLOCKED\n");
 1154         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
 1155                 db_printf("DESTROYED\n");
 1156                 return;
 1157         } else if (sx->sx_lock & SX_LOCK_SHARED)
 1158                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
 1159         else {
 1160                 td = sx_xholder(sx);
 1161                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1162                     td->td_tid, td->td_proc->p_pid, td->td_name);
 1163                 if (sx_recursed(sx))
 1164                         db_printf(" recursed: %d\n", sx->sx_recurse);
 1165         }
 1166 
 1167         db_printf(" waiters: ");
 1168         switch(sx->sx_lock &
 1169             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
 1170         case SX_LOCK_SHARED_WAITERS:
 1171                 db_printf("shared\n");
 1172                 break;
 1173         case SX_LOCK_EXCLUSIVE_WAITERS:
 1174                 db_printf("exclusive\n");
 1175                 break;
 1176         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
 1177                 db_printf("exclusive and shared\n");
 1178                 break;
 1179         default:
 1180                 db_printf("none\n");
 1181         }
 1182 }
 1183 
 1184 /*
 1185  * Check to see if a thread that is blocked on a sleep queue is actually
 1186  * blocked on an sx lock.  If so, output some details and return true.
 1187  * If the lock has an exclusive owner, return that in *ownerp.
 1188  */
 1189 int
 1190 sx_chain(struct thread *td, struct thread **ownerp)
 1191 {
 1192         struct sx *sx;
 1193 
 1194         /*
 1195          * Check to see if this thread is blocked on an sx lock.
 1196          * First, we check the lock class.  If that is ok, then we
 1197          * compare the lock name against the wait message.
 1198          */
 1199         sx = td->td_wchan;
 1200         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
 1201             sx->lock_object.lo_name != td->td_wmesg)
 1202                 return (0);
 1203 
 1204         /* We think we have an sx lock, so output some details. */
 1205         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
 1206         *ownerp = sx_xholder(sx);
 1207         if (sx->sx_lock & SX_LOCK_SHARED)
 1208                 db_printf("SLOCK (count %ju)\n",
 1209                     (uintmax_t)SX_SHARERS(sx->sx_lock));
 1210         else
 1211                 db_printf("XLOCK\n");
 1212         return (1);
 1213 }
 1214 #endif

Cache object: b7308a70babba85d8bae0f75c6e276ca


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.