The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
    3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice(s), this list of conditions and the following disclaimer as
   11  *    the first lines of this file unmodified other than the possible
   12  *    addition of one or more copyright notices.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice(s), this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   27  * DAMAGE.
   28  */
   29 
   30 /*
   31  * Shared/exclusive locks.  This implementation attempts to ensure
   32  * deterministic lock granting behavior, so that slocks and xlocks are
   33  * interleaved.
   34  *
   35  * Priority propagation will not generally raise the priority of lock holders,
   36  * so should not be relied upon in combination with sx locks.
   37  */
   38 
   39 #include "opt_ddb.h"
   40 #include "opt_kdtrace.h"
   41 #include "opt_no_adaptive_sx.h"
   42 
   43 #include <sys/cdefs.h>
   44 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_sx.c 230168 2012-01-15 22:02:35Z avg $");
   45 
   46 #include <sys/param.h>
   47 #include <sys/systm.h>
   48 #include <sys/ktr.h>
   49 #include <sys/lock.h>
   50 #include <sys/mutex.h>
   51 #include <sys/proc.h>
   52 #include <sys/sleepqueue.h>
   53 #include <sys/sx.h>
   54 #include <sys/sysctl.h>
   55 
   56 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
   57 #include <machine/cpu.h>
   58 #endif
   59 
   60 #ifdef DDB
   61 #include <ddb/ddb.h>
   62 #endif
   63 
   64 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
   65 #define ADAPTIVE_SX
   66 #endif
   67 
   68 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
   69 
   70 /* Handy macros for sleep queues. */
   71 #define SQ_EXCLUSIVE_QUEUE      0
   72 #define SQ_SHARED_QUEUE         1
   73 
   74 #ifdef ADAPTIVE_SX
   75 #define ASX_RETRIES             10
   76 #define ASX_LOOPS               10000
   77 #endif
   78 
   79 /*
   80  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
   81  * drop Giant anytime we have to sleep or if we adaptively spin.
   82  */
   83 #define GIANT_DECLARE                                                   \
   84         int _giantcnt = 0;                                              \
   85         WITNESS_SAVE_DECL(Giant)                                        \
   86 
   87 #define GIANT_SAVE() do {                                               \
   88         if (mtx_owned(&Giant)) {                                        \
   89                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
   90                 while (mtx_owned(&Giant)) {                             \
   91                         _giantcnt++;                                    \
   92                         mtx_unlock(&Giant);                             \
   93                 }                                                       \
   94         }                                                               \
   95 } while (0)
   96 
   97 #define GIANT_RESTORE() do {                                            \
   98         if (_giantcnt > 0) {                                            \
   99                 mtx_assert(&Giant, MA_NOTOWNED);                        \
  100                 while (_giantcnt--)                                     \
  101                         mtx_lock(&Giant);                               \
  102                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  103         }                                                               \
  104 } while (0)
  105 
  106 /*
  107  * Returns true if an exclusive lock is recursed.  It assumes
  108  * curthread currently has an exclusive lock.
  109  */
  110 #define sx_recurse              lock_object.lo_data
  111 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
  112 
  113 static void     assert_sx(struct lock_object *lock, int what);
  114 #ifdef DDB
  115 static void     db_show_sx(struct lock_object *lock);
  116 #endif
  117 static void     lock_sx(struct lock_object *lock, int how);
  118 #ifdef KDTRACE_HOOKS
  119 static int      owner_sx(struct lock_object *lock, struct thread **owner);
  120 #endif
  121 static int      unlock_sx(struct lock_object *lock);
  122 
  123 struct lock_class lock_class_sx = {
  124         .lc_name = "sx",
  125         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
  126         .lc_assert = assert_sx,
  127 #ifdef DDB
  128         .lc_ddb_show = db_show_sx,
  129 #endif
  130         .lc_lock = lock_sx,
  131         .lc_unlock = unlock_sx,
  132 #ifdef KDTRACE_HOOKS
  133         .lc_owner = owner_sx,
  134 #endif
  135 };
  136 
  137 #ifndef INVARIANTS
  138 #define _sx_assert(sx, what, file, line)
  139 #endif
  140 
  141 void
  142 assert_sx(struct lock_object *lock, int what)
  143 {
  144 
  145         sx_assert((struct sx *)lock, what);
  146 }
  147 
  148 void
  149 lock_sx(struct lock_object *lock, int how)
  150 {
  151         struct sx *sx;
  152 
  153         sx = (struct sx *)lock;
  154         if (how)
  155                 sx_xlock(sx);
  156         else
  157                 sx_slock(sx);
  158 }
  159 
  160 int
  161 unlock_sx(struct lock_object *lock)
  162 {
  163         struct sx *sx;
  164 
  165         sx = (struct sx *)lock;
  166         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
  167         if (sx_xlocked(sx)) {
  168                 sx_xunlock(sx);
  169                 return (1);
  170         } else {
  171                 sx_sunlock(sx);
  172                 return (0);
  173         }
  174 }
  175 
  176 #ifdef KDTRACE_HOOKS
  177 int
  178 owner_sx(struct lock_object *lock, struct thread **owner)
  179 {
  180         struct sx *sx = (struct sx *)lock;
  181         uintptr_t x = sx->sx_lock;
  182 
  183         *owner = (struct thread *)SX_OWNER(x);
  184         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
  185             (*owner != NULL));
  186 }
  187 #endif
  188 
  189 void
  190 sx_sysinit(void *arg)
  191 {
  192         struct sx_args *sargs = arg;
  193 
  194         sx_init(sargs->sa_sx, sargs->sa_desc);
  195 }
  196 
  197 void
  198 sx_init_flags(struct sx *sx, const char *description, int opts)
  199 {
  200         int flags;
  201 
  202         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
  203             SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
  204         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
  205             ("%s: sx_lock not aligned for %s: %p", __func__, description,
  206             &sx->sx_lock));
  207 
  208         flags = LO_SLEEPABLE | LO_UPGRADABLE;
  209         if (opts & SX_DUPOK)
  210                 flags |= LO_DUPOK;
  211         if (opts & SX_NOPROFILE)
  212                 flags |= LO_NOPROFILE;
  213         if (!(opts & SX_NOWITNESS))
  214                 flags |= LO_WITNESS;
  215         if (opts & SX_RECURSE)
  216                 flags |= LO_RECURSABLE;
  217         if (opts & SX_QUIET)
  218                 flags |= LO_QUIET;
  219 
  220         flags |= opts & SX_NOADAPTIVE;
  221         sx->sx_lock = SX_LOCK_UNLOCKED;
  222         sx->sx_recurse = 0;
  223         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
  224 }
  225 
  226 void
  227 sx_destroy(struct sx *sx)
  228 {
  229 
  230         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
  231         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
  232         sx->sx_lock = SX_LOCK_DESTROYED;
  233         lock_destroy(&sx->lock_object);
  234 }
  235 
  236 int
  237 _sx_slock(struct sx *sx, int opts, const char *file, int line)
  238 {
  239         int error = 0;
  240 
  241         MPASS(curthread != NULL);
  242         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  243             ("sx_slock() of destroyed sx @ %s:%d", file, line));
  244         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
  245         error = __sx_slock(sx, opts, file, line);
  246         if (!error) {
  247                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
  248                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
  249                 curthread->td_locks++;
  250         }
  251 
  252         return (error);
  253 }
  254 
  255 int
  256 _sx_try_slock(struct sx *sx, const char *file, int line)
  257 {
  258         uintptr_t x;
  259 
  260         for (;;) {
  261                 x = sx->sx_lock;
  262                 KASSERT(x != SX_LOCK_DESTROYED,
  263                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
  264                 if (!(x & SX_LOCK_SHARED))
  265                         break;
  266                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
  267                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
  268                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
  269                         curthread->td_locks++;
  270                         return (1);
  271                 }
  272         }
  273 
  274         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
  275         return (0);
  276 }
  277 
  278 int
  279 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
  280 {
  281         int error = 0;
  282 
  283         MPASS(curthread != NULL);
  284         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  285             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
  286         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
  287             line, NULL);
  288         error = __sx_xlock(sx, curthread, opts, file, line);
  289         if (!error) {
  290                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
  291                     file, line);
  292                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  293                 curthread->td_locks++;
  294         }
  295 
  296         return (error);
  297 }
  298 
  299 int
  300 _sx_try_xlock(struct sx *sx, const char *file, int line)
  301 {
  302         int rval;
  303 
  304         MPASS(curthread != NULL);
  305         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  306             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
  307 
  308         if (sx_xlocked(sx) &&
  309             (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  310                 sx->sx_recurse++;
  311                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  312                 rval = 1;
  313         } else
  314                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
  315                     (uintptr_t)curthread);
  316         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
  317         if (rval) {
  318                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  319                     file, line);
  320                 curthread->td_locks++;
  321         }
  322 
  323         return (rval);
  324 }
  325 
  326 void
  327 _sx_sunlock(struct sx *sx, const char *file, int line)
  328 {
  329 
  330         MPASS(curthread != NULL);
  331         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  332             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
  333         _sx_assert(sx, SA_SLOCKED, file, line);
  334         curthread->td_locks--;
  335         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
  336         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
  337         __sx_sunlock(sx, file, line);
  338         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
  339 }
  340 
  341 void
  342 _sx_xunlock(struct sx *sx, const char *file, int line)
  343 {
  344 
  345         MPASS(curthread != NULL);
  346         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  347             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
  348         _sx_assert(sx, SA_XLOCKED, file, line);
  349         curthread->td_locks--;
  350         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  351         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
  352             line);
  353         if (!sx_recursed(sx))
  354                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
  355         __sx_xunlock(sx, curthread, file, line);
  356 }
  357 
  358 /*
  359  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
  360  * This will only succeed if this thread holds a single shared lock.
  361  * Return 1 if if the upgrade succeed, 0 otherwise.
  362  */
  363 int
  364 _sx_try_upgrade(struct sx *sx, const char *file, int line)
  365 {
  366         uintptr_t x;
  367         int success;
  368 
  369         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  370             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
  371         _sx_assert(sx, SA_SLOCKED, file, line);
  372 
  373         /*
  374          * Try to switch from one shared lock to an exclusive lock.  We need
  375          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
  376          * we will wake up the exclusive waiters when we drop the lock.
  377          */
  378         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
  379         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
  380             (uintptr_t)curthread | x);
  381         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
  382         if (success) {
  383                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  384                     file, line);
  385                 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
  386         }
  387         return (success);
  388 }
  389 
  390 /*
  391  * Downgrade an unrecursed exclusive lock into a single shared lock.
  392  */
  393 void
  394 _sx_downgrade(struct sx *sx, const char *file, int line)
  395 {
  396         uintptr_t x;
  397         int wakeup_swapper;
  398 
  399         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  400             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
  401         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
  402 #ifndef INVARIANTS
  403         if (sx_recursed(sx))
  404                 panic("downgrade of a recursed lock");
  405 #endif
  406 
  407         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
  408 
  409         /*
  410          * Try to switch from an exclusive lock with no shared waiters
  411          * to one sharer with no shared waiters.  If there are
  412          * exclusive waiters, we don't need to lock the sleep queue so
  413          * long as we preserve the flag.  We do one quick try and if
  414          * that fails we grab the sleepq lock to keep the flags from
  415          * changing and do it the slow way.
  416          *
  417          * We have to lock the sleep queue if there are shared waiters
  418          * so we can wake them up.
  419          */
  420         x = sx->sx_lock;
  421         if (!(x & SX_LOCK_SHARED_WAITERS) &&
  422             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
  423             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
  424                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  425                 return;
  426         }
  427 
  428         /*
  429          * Lock the sleep queue so we can read the waiters bits
  430          * without any races and wakeup any shared waiters.
  431          */
  432         sleepq_lock(&sx->lock_object);
  433 
  434         /*
  435          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
  436          * shared lock.  If there are any shared waiters, wake them up.
  437          */
  438         wakeup_swapper = 0;
  439         x = sx->sx_lock;
  440         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
  441             (x & SX_LOCK_EXCLUSIVE_WAITERS));
  442         if (x & SX_LOCK_SHARED_WAITERS)
  443                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
  444                     0, SQ_SHARED_QUEUE);
  445         sleepq_release(&sx->lock_object);
  446 
  447         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  448         LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
  449 
  450         if (wakeup_swapper)
  451                 kick_proc0();
  452 }
  453 
  454 /*
  455  * This function represents the so-called 'hard case' for sx_xlock
  456  * operation.  All 'easy case' failures are redirected to this.  Note
  457  * that ideally this would be a static function, but it needs to be
  458  * accessible from at least sx.h.
  459  */
  460 int
  461 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
  462     int line)
  463 {
  464         GIANT_DECLARE;
  465 #ifdef ADAPTIVE_SX
  466         volatile struct thread *owner;
  467         u_int i, spintries = 0;
  468 #endif
  469         uintptr_t x;
  470 #ifdef LOCK_PROFILING
  471         uint64_t waittime = 0;
  472         int contested = 0;
  473 #endif
  474         int error = 0;
  475 #ifdef  KDTRACE_HOOKS
  476         uint64_t spin_cnt = 0;
  477         uint64_t sleep_cnt = 0;
  478         int64_t sleep_time = 0;
  479 #endif
  480 
  481         /* If we already hold an exclusive lock, then recurse. */
  482         if (sx_xlocked(sx)) {
  483                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
  484             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
  485                     sx->lock_object.lo_name, file, line));
  486                 sx->sx_recurse++;
  487                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  488                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  489                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
  490                 return (0);
  491         }
  492 
  493         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  494                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
  495                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
  496 
  497         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
  498 #ifdef KDTRACE_HOOKS
  499                 spin_cnt++;
  500 #endif
  501                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  502                     &waittime);
  503 #ifdef ADAPTIVE_SX
  504                 /*
  505                  * If the lock is write locked and the owner is
  506                  * running on another CPU, spin until the owner stops
  507                  * running or the state of the lock changes.
  508                  */
  509                 x = sx->sx_lock;
  510                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  511                         if ((x & SX_LOCK_SHARED) == 0) {
  512                                 x = SX_OWNER(x);
  513                                 owner = (struct thread *)x;
  514                                 if (TD_IS_RUNNING(owner)) {
  515                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  516                                                 CTR3(KTR_LOCK,
  517                                             "%s: spinning on %p held by %p",
  518                                                     __func__, sx, owner);
  519                                         GIANT_SAVE();
  520                                         while (SX_OWNER(sx->sx_lock) == x &&
  521                                             TD_IS_RUNNING(owner)) {
  522                                                 cpu_spinwait();
  523 #ifdef KDTRACE_HOOKS
  524                                                 spin_cnt++;
  525 #endif
  526                                         }
  527                                         continue;
  528                                 }
  529                         } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
  530                                 GIANT_SAVE();
  531                                 spintries++;
  532                                 for (i = 0; i < ASX_LOOPS; i++) {
  533                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  534                                                 CTR4(KTR_LOCK,
  535                                     "%s: shared spinning on %p with %u and %u",
  536                                                     __func__, sx, spintries, i);
  537                                         x = sx->sx_lock;
  538                                         if ((x & SX_LOCK_SHARED) == 0 ||
  539                                             SX_SHARERS(x) == 0)
  540                                                 break;
  541                                         cpu_spinwait();
  542 #ifdef KDTRACE_HOOKS
  543                                         spin_cnt++;
  544 #endif
  545                                 }
  546                                 if (i != ASX_LOOPS)
  547                                         continue;
  548                         }
  549                 }
  550 #endif
  551 
  552                 sleepq_lock(&sx->lock_object);
  553                 x = sx->sx_lock;
  554 
  555                 /*
  556                  * If the lock was released while spinning on the
  557                  * sleep queue chain lock, try again.
  558                  */
  559                 if (x == SX_LOCK_UNLOCKED) {
  560                         sleepq_release(&sx->lock_object);
  561                         continue;
  562                 }
  563 
  564 #ifdef ADAPTIVE_SX
  565                 /*
  566                  * The current lock owner might have started executing
  567                  * on another CPU (or the lock could have changed
  568                  * owners) while we were waiting on the sleep queue
  569                  * chain lock.  If so, drop the sleep queue lock and try
  570                  * again.
  571                  */
  572                 if (!(x & SX_LOCK_SHARED) &&
  573                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  574                         owner = (struct thread *)SX_OWNER(x);
  575                         if (TD_IS_RUNNING(owner)) {
  576                                 sleepq_release(&sx->lock_object);
  577                                 continue;
  578                         }
  579                 }
  580 #endif
  581 
  582                 /*
  583                  * If an exclusive lock was released with both shared
  584                  * and exclusive waiters and a shared waiter hasn't
  585                  * woken up and acquired the lock yet, sx_lock will be
  586                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
  587                  * If we see that value, try to acquire it once.  Note
  588                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
  589                  * as there are other exclusive waiters still.  If we
  590                  * fail, restart the loop.
  591                  */
  592                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
  593                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
  594                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
  595                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
  596                                 sleepq_release(&sx->lock_object);
  597                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
  598                                     __func__, sx);
  599                                 break;
  600                         }
  601                         sleepq_release(&sx->lock_object);
  602                         continue;
  603                 }
  604 
  605                 /*
  606                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
  607                  * than loop back and retry.
  608                  */
  609                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
  610                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  611                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
  612                                 sleepq_release(&sx->lock_object);
  613                                 continue;
  614                         }
  615                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  616                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
  617                                     __func__, sx);
  618                 }
  619 
  620                 /*
  621                  * Since we have been unable to acquire the exclusive
  622                  * lock and the exclusive waiters flag is set, we have
  623                  * to sleep.
  624                  */
  625                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  626                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  627                             __func__, sx);
  628 
  629 #ifdef KDTRACE_HOOKS
  630                 sleep_time -= lockstat_nsecs();
  631 #endif
  632                 GIANT_SAVE();
  633                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  634                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  635                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
  636                 if (!(opts & SX_INTERRUPTIBLE))
  637                         sleepq_wait(&sx->lock_object, 0);
  638                 else
  639                         error = sleepq_wait_sig(&sx->lock_object, 0);
  640 #ifdef KDTRACE_HOOKS
  641                 sleep_time += lockstat_nsecs();
  642                 sleep_cnt++;
  643 #endif
  644                 if (error) {
  645                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  646                                 CTR2(KTR_LOCK,
  647                         "%s: interruptible sleep by %p suspended by signal",
  648                                     __func__, sx);
  649                         break;
  650                 }
  651                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  652                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  653                             __func__, sx);
  654         }
  655 
  656         GIANT_RESTORE();
  657         if (!error)
  658                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
  659                     contested, waittime, file, line);
  660 #ifdef KDTRACE_HOOKS
  661         if (sleep_time)
  662                 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
  663         if (spin_cnt > sleep_cnt)
  664                 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
  665 #endif
  666         return (error);
  667 }
  668 
  669 /*
  670  * This function represents the so-called 'hard case' for sx_xunlock
  671  * operation.  All 'easy case' failures are redirected to this.  Note
  672  * that ideally this would be a static function, but it needs to be
  673  * accessible from at least sx.h.
  674  */
  675 void
  676 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
  677 {
  678         uintptr_t x;
  679         int queue, wakeup_swapper;
  680 
  681         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
  682 
  683         /* If the lock is recursed, then unrecurse one level. */
  684         if (sx_xlocked(sx) && sx_recursed(sx)) {
  685                 if ((--sx->sx_recurse) == 0)
  686                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  687                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  688                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
  689                 return;
  690         }
  691         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
  692             SX_LOCK_EXCLUSIVE_WAITERS));
  693         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  694                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
  695 
  696         sleepq_lock(&sx->lock_object);
  697         x = SX_LOCK_UNLOCKED;
  698 
  699         /*
  700          * The wake up algorithm here is quite simple and probably not
  701          * ideal.  It gives precedence to shared waiters if they are
  702          * present.  For this condition, we have to preserve the
  703          * state of the exclusive waiters flag.
  704          * If interruptible sleeps left the shared queue empty avoid a
  705          * starvation for the threads sleeping on the exclusive queue by giving
  706          * them precedence and cleaning up the shared waiters bit anyway.
  707          */
  708         if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
  709             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
  710                 queue = SQ_SHARED_QUEUE;
  711                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
  712         } else
  713                 queue = SQ_EXCLUSIVE_QUEUE;
  714 
  715         /* Wake up all the waiters for the specific queue. */
  716         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  717                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
  718                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
  719                     "exclusive");
  720         atomic_store_rel_ptr(&sx->sx_lock, x);
  721         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
  722             queue);
  723         sleepq_release(&sx->lock_object);
  724         if (wakeup_swapper)
  725                 kick_proc0();
  726 }
  727 
  728 /*
  729  * This function represents the so-called 'hard case' for sx_slock
  730  * operation.  All 'easy case' failures are redirected to this.  Note
  731  * that ideally this would be a static function, but it needs to be
  732  * accessible from at least sx.h.
  733  */
  734 int
  735 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
  736 {
  737         GIANT_DECLARE;
  738 #ifdef ADAPTIVE_SX
  739         volatile struct thread *owner;
  740 #endif
  741 #ifdef LOCK_PROFILING
  742         uint64_t waittime = 0;
  743         int contested = 0;
  744 #endif
  745         uintptr_t x;
  746         int error = 0;
  747 #ifdef KDTRACE_HOOKS
  748         uint64_t spin_cnt = 0;
  749         uint64_t sleep_cnt = 0;
  750         int64_t sleep_time = 0;
  751 #endif
  752 
  753         /*
  754          * As with rwlocks, we don't make any attempt to try to block
  755          * shared locks once there is an exclusive waiter.
  756          */
  757         for (;;) {
  758 #ifdef KDTRACE_HOOKS
  759                 spin_cnt++;
  760 #endif
  761                 x = sx->sx_lock;
  762 
  763                 /*
  764                  * If no other thread has an exclusive lock then try to bump up
  765                  * the count of sharers.  Since we have to preserve the state
  766                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
  767                  * shared lock loop back and retry.
  768                  */
  769                 if (x & SX_LOCK_SHARED) {
  770                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
  771                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
  772                             x + SX_ONE_SHARER)) {
  773                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  774                                         CTR4(KTR_LOCK,
  775                                             "%s: %p succeed %p -> %p", __func__,
  776                                             sx, (void *)x,
  777                                             (void *)(x + SX_ONE_SHARER));
  778                                 break;
  779                         }
  780                         continue;
  781                 }
  782                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  783                     &waittime);
  784 
  785 #ifdef ADAPTIVE_SX
  786                 /*
  787                  * If the owner is running on another CPU, spin until
  788                  * the owner stops running or the state of the lock
  789                  * changes.
  790                  */
  791                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  792                         x = SX_OWNER(x);
  793                         owner = (struct thread *)x;
  794                         if (TD_IS_RUNNING(owner)) {
  795                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  796                                         CTR3(KTR_LOCK,
  797                                             "%s: spinning on %p held by %p",
  798                                             __func__, sx, owner);
  799                                 GIANT_SAVE();
  800                                 while (SX_OWNER(sx->sx_lock) == x &&
  801                                     TD_IS_RUNNING(owner)) {
  802 #ifdef KDTRACE_HOOKS
  803                                         spin_cnt++;
  804 #endif
  805                                         cpu_spinwait();
  806                                 }
  807                                 continue;
  808                         }
  809                 }
  810 #endif
  811 
  812                 /*
  813                  * Some other thread already has an exclusive lock, so
  814                  * start the process of blocking.
  815                  */
  816                 sleepq_lock(&sx->lock_object);
  817                 x = sx->sx_lock;
  818 
  819                 /*
  820                  * The lock could have been released while we spun.
  821                  * In this case loop back and retry.
  822                  */
  823                 if (x & SX_LOCK_SHARED) {
  824                         sleepq_release(&sx->lock_object);
  825                         continue;
  826                 }
  827 
  828 #ifdef ADAPTIVE_SX
  829                 /*
  830                  * If the owner is running on another CPU, spin until
  831                  * the owner stops running or the state of the lock
  832                  * changes.
  833                  */
  834                 if (!(x & SX_LOCK_SHARED) &&
  835                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
  836                         owner = (struct thread *)SX_OWNER(x);
  837                         if (TD_IS_RUNNING(owner)) {
  838                                 sleepq_release(&sx->lock_object);
  839                                 continue;
  840                         }
  841                 }
  842 #endif
  843 
  844                 /*
  845                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
  846                  * fail to set it drop the sleep queue lock and loop
  847                  * back.
  848                  */
  849                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
  850                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  851                             x | SX_LOCK_SHARED_WAITERS)) {
  852                                 sleepq_release(&sx->lock_object);
  853                                 continue;
  854                         }
  855                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  856                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
  857                                     __func__, sx);
  858                 }
  859 
  860                 /*
  861                  * Since we have been unable to acquire the shared lock,
  862                  * we have to sleep.
  863                  */
  864                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  865                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  866                             __func__, sx);
  867 
  868 #ifdef KDTRACE_HOOKS
  869                 sleep_time -= lockstat_nsecs();
  870 #endif
  871                 GIANT_SAVE();
  872                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  873                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  874                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
  875                 if (!(opts & SX_INTERRUPTIBLE))
  876                         sleepq_wait(&sx->lock_object, 0);
  877                 else
  878                         error = sleepq_wait_sig(&sx->lock_object, 0);
  879 #ifdef KDTRACE_HOOKS
  880                 sleep_time += lockstat_nsecs();
  881                 sleep_cnt++;
  882 #endif
  883                 if (error) {
  884                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  885                                 CTR2(KTR_LOCK,
  886                         "%s: interruptible sleep by %p suspended by signal",
  887                                     __func__, sx);
  888                         break;
  889                 }
  890                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  891                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  892                             __func__, sx);
  893         }
  894         if (error == 0)
  895                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
  896                     contested, waittime, file, line);
  897 #ifdef KDTRACE_HOOKS
  898         if (sleep_time)
  899                 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
  900         if (spin_cnt > sleep_cnt)
  901                 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
  902 #endif
  903         GIANT_RESTORE();
  904         return (error);
  905 }
  906 
  907 /*
  908  * This function represents the so-called 'hard case' for sx_sunlock
  909  * operation.  All 'easy case' failures are redirected to this.  Note
  910  * that ideally this would be a static function, but it needs to be
  911  * accessible from at least sx.h.
  912  */
  913 void
  914 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
  915 {
  916         uintptr_t x;
  917         int wakeup_swapper;
  918 
  919         for (;;) {
  920                 x = sx->sx_lock;
  921 
  922                 /*
  923                  * We should never have sharers while at least one thread
  924                  * holds a shared lock.
  925                  */
  926                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
  927                     ("%s: waiting sharers", __func__));
  928 
  929                 /*
  930                  * See if there is more than one shared lock held.  If
  931                  * so, just drop one and return.
  932                  */
  933                 if (SX_SHARERS(x) > 1) {
  934                         if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
  935                             x - SX_ONE_SHARER)) {
  936                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  937                                         CTR4(KTR_LOCK,
  938                                             "%s: %p succeeded %p -> %p",
  939                                             __func__, sx, (void *)x,
  940                                             (void *)(x - SX_ONE_SHARER));
  941                                 break;
  942                         }
  943                         continue;
  944                 }
  945 
  946                 /*
  947                  * If there aren't any waiters for an exclusive lock,
  948                  * then try to drop it quickly.
  949                  */
  950                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
  951                         MPASS(x == SX_SHARERS_LOCK(1));
  952                         if (atomic_cmpset_rel_ptr(&sx->sx_lock,
  953                             SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
  954                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  955                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
  956                                             __func__, sx);
  957                                 break;
  958                         }
  959                         continue;
  960                 }
  961 
  962                 /*
  963                  * At this point, there should just be one sharer with
  964                  * exclusive waiters.
  965                  */
  966                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
  967 
  968                 sleepq_lock(&sx->lock_object);
  969 
  970                 /*
  971                  * Wake up semantic here is quite simple:
  972                  * Just wake up all the exclusive waiters.
  973                  * Note that the state of the lock could have changed,
  974                  * so if it fails loop back and retry.
  975                  */
  976                 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
  977                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
  978                     SX_LOCK_UNLOCKED)) {
  979                         sleepq_release(&sx->lock_object);
  980                         continue;
  981                 }
  982                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  983                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
  984                             "exclusive queue", __func__, sx);
  985                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
  986                     0, SQ_EXCLUSIVE_QUEUE);
  987                 sleepq_release(&sx->lock_object);
  988                 if (wakeup_swapper)
  989                         kick_proc0();
  990                 break;
  991         }
  992 }
  993 
  994 #ifdef INVARIANT_SUPPORT
  995 #ifndef INVARIANTS
  996 #undef  _sx_assert
  997 #endif
  998 
  999 /*
 1000  * In the non-WITNESS case, sx_assert() can only detect that at least
 1001  * *some* thread owns an slock, but it cannot guarantee that *this*
 1002  * thread owns an slock.
 1003  */
 1004 void
 1005 _sx_assert(struct sx *sx, int what, const char *file, int line)
 1006 {
 1007 #ifndef WITNESS
 1008         int slocked = 0;
 1009 #endif
 1010 
 1011         if (panicstr != NULL)
 1012                 return;
 1013         switch (what) {
 1014         case SA_SLOCKED:
 1015         case SA_SLOCKED | SA_NOTRECURSED:
 1016         case SA_SLOCKED | SA_RECURSED:
 1017 #ifndef WITNESS
 1018                 slocked = 1;
 1019                 /* FALLTHROUGH */
 1020 #endif
 1021         case SA_LOCKED:
 1022         case SA_LOCKED | SA_NOTRECURSED:
 1023         case SA_LOCKED | SA_RECURSED:
 1024 #ifdef WITNESS
 1025                 witness_assert(&sx->lock_object, what, file, line);
 1026 #else
 1027                 /*
 1028                  * If some other thread has an exclusive lock or we
 1029                  * have one and are asserting a shared lock, fail.
 1030                  * Also, if no one has a lock at all, fail.
 1031                  */
 1032                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
 1033                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
 1034                     sx_xholder(sx) != curthread)))
 1035                         panic("Lock %s not %slocked @ %s:%d\n",
 1036                             sx->lock_object.lo_name, slocked ? "share " : "",
 1037                             file, line);
 1038 
 1039                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
 1040                         if (sx_recursed(sx)) {
 1041                                 if (what & SA_NOTRECURSED)
 1042                                         panic("Lock %s recursed @ %s:%d\n",
 1043                                             sx->lock_object.lo_name, file,
 1044                                             line);
 1045                         } else if (what & SA_RECURSED)
 1046                                 panic("Lock %s not recursed @ %s:%d\n",
 1047                                     sx->lock_object.lo_name, file, line);
 1048                 }
 1049 #endif
 1050                 break;
 1051         case SA_XLOCKED:
 1052         case SA_XLOCKED | SA_NOTRECURSED:
 1053         case SA_XLOCKED | SA_RECURSED:
 1054                 if (sx_xholder(sx) != curthread)
 1055                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1056                             sx->lock_object.lo_name, file, line);
 1057                 if (sx_recursed(sx)) {
 1058                         if (what & SA_NOTRECURSED)
 1059                                 panic("Lock %s recursed @ %s:%d\n",
 1060                                     sx->lock_object.lo_name, file, line);
 1061                 } else if (what & SA_RECURSED)
 1062                         panic("Lock %s not recursed @ %s:%d\n",
 1063                             sx->lock_object.lo_name, file, line);
 1064                 break;
 1065         case SA_UNLOCKED:
 1066 #ifdef WITNESS
 1067                 witness_assert(&sx->lock_object, what, file, line);
 1068 #else
 1069                 /*
 1070                  * If we hold an exclusve lock fail.  We can't
 1071                  * reliably check to see if we hold a shared lock or
 1072                  * not.
 1073                  */
 1074                 if (sx_xholder(sx) == curthread)
 1075                         panic("Lock %s exclusively locked @ %s:%d\n",
 1076                             sx->lock_object.lo_name, file, line);
 1077 #endif
 1078                 break;
 1079         default:
 1080                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
 1081                     line);
 1082         }
 1083 }
 1084 #endif  /* INVARIANT_SUPPORT */
 1085 
 1086 #ifdef DDB
 1087 static void
 1088 db_show_sx(struct lock_object *lock)
 1089 {
 1090         struct thread *td;
 1091         struct sx *sx;
 1092 
 1093         sx = (struct sx *)lock;
 1094 
 1095         db_printf(" state: ");
 1096         if (sx->sx_lock == SX_LOCK_UNLOCKED)
 1097                 db_printf("UNLOCKED\n");
 1098         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
 1099                 db_printf("DESTROYED\n");
 1100                 return;
 1101         } else if (sx->sx_lock & SX_LOCK_SHARED)
 1102                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
 1103         else {
 1104                 td = sx_xholder(sx);
 1105                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1106                     td->td_tid, td->td_proc->p_pid, td->td_name);
 1107                 if (sx_recursed(sx))
 1108                         db_printf(" recursed: %d\n", sx->sx_recurse);
 1109         }
 1110 
 1111         db_printf(" waiters: ");
 1112         switch(sx->sx_lock &
 1113             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
 1114         case SX_LOCK_SHARED_WAITERS:
 1115                 db_printf("shared\n");
 1116                 break;
 1117         case SX_LOCK_EXCLUSIVE_WAITERS:
 1118                 db_printf("exclusive\n");
 1119                 break;
 1120         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
 1121                 db_printf("exclusive and shared\n");
 1122                 break;
 1123         default:
 1124                 db_printf("none\n");
 1125         }
 1126 }
 1127 
 1128 /*
 1129  * Check to see if a thread that is blocked on a sleep queue is actually
 1130  * blocked on an sx lock.  If so, output some details and return true.
 1131  * If the lock has an exclusive owner, return that in *ownerp.
 1132  */
 1133 int
 1134 sx_chain(struct thread *td, struct thread **ownerp)
 1135 {
 1136         struct sx *sx;
 1137 
 1138         /*
 1139          * Check to see if this thread is blocked on an sx lock.
 1140          * First, we check the lock class.  If that is ok, then we
 1141          * compare the lock name against the wait message.
 1142          */
 1143         sx = td->td_wchan;
 1144         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
 1145             sx->lock_object.lo_name != td->td_wmesg)
 1146                 return (0);
 1147 
 1148         /* We think we have an sx lock, so output some details. */
 1149         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
 1150         *ownerp = sx_xholder(sx);
 1151         if (sx->sx_lock & SX_LOCK_SHARED)
 1152                 db_printf("SLOCK (count %ju)\n",
 1153                     (uintmax_t)SX_SHARERS(sx->sx_lock));
 1154         else
 1155                 db_printf("XLOCK\n");
 1156         return (1);
 1157 }
 1158 #endif

Cache object: 14c9dcd7eaca301c13ef70855d2bb98d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.