The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
    3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice(s), this list of conditions and the following disclaimer as
   11  *    the first lines of this file unmodified other than the possible
   12  *    addition of one or more copyright notices.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice(s), this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   27  * DAMAGE.
   28  */
   29 
   30 /*
   31  * Shared/exclusive locks.  This implementation attempts to ensure
   32  * deterministic lock granting behavior, so that slocks and xlocks are
   33  * interleaved.
   34  *
   35  * Priority propagation will not generally raise the priority of lock holders,
   36  * so should not be relied upon in combination with sx locks.
   37  */
   38 
   39 #include "opt_adaptive_sx.h"
   40 #include "opt_ddb.h"
   41 
   42 #include <sys/cdefs.h>
   43 __FBSDID("$FreeBSD$");
   44 
   45 #include <sys/param.h>
   46 #include <sys/ktr.h>
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/proc.h>
   50 #include <sys/sleepqueue.h>
   51 #include <sys/sx.h>
   52 #include <sys/systm.h>
   53 
   54 #ifdef ADAPTIVE_SX
   55 #include <machine/cpu.h>
   56 #endif
   57 
   58 #ifdef DDB
   59 #include <ddb/ddb.h>
   60 #endif
   61 
   62 #if !defined(SMP) && defined(ADAPTIVE_SX)
   63 #error "You must have SMP to enable the ADAPTIVE_SX option"
   64 #endif
   65 
   66 CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) ==
   67     (SX_ADAPTIVESPIN | SX_RECURSE));
   68 
   69 /* Handy macros for sleep queues. */
   70 #define SQ_EXCLUSIVE_QUEUE      0
   71 #define SQ_SHARED_QUEUE         1
   72 
   73 /*
   74  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
   75  * drop Giant anytime we have to sleep or if we adaptively spin.
   76  */
   77 #define GIANT_DECLARE                                                   \
   78         int _giantcnt = 0;                                              \
   79         WITNESS_SAVE_DECL(Giant)                                        \
   80 
   81 #define GIANT_SAVE() do {                                               \
   82         if (mtx_owned(&Giant)) {                                        \
   83                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
   84                 while (mtx_owned(&Giant)) {                             \
   85                         _giantcnt++;                                    \
   86                         mtx_unlock(&Giant);                             \
   87                 }                                                       \
   88         }                                                               \
   89 } while (0)
   90 
   91 #define GIANT_RESTORE() do {                                            \
   92         if (_giantcnt > 0) {                                            \
   93                 mtx_assert(&Giant, MA_NOTOWNED);                        \
   94                 while (_giantcnt--)                                     \
   95                         mtx_lock(&Giant);                               \
   96                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
   97         }                                                               \
   98 } while (0)
   99 
  100 /*
  101  * Returns true if an exclusive lock is recursed.  It assumes
  102  * curthread currently has an exclusive lock.
  103  */
  104 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
  105 
  106 #ifdef DDB
  107 static void     db_show_sx(struct lock_object *lock);
  108 #endif
  109 static void     lock_sx(struct lock_object *lock, int how);
  110 static int      unlock_sx(struct lock_object *lock);
  111 
  112 struct lock_class lock_class_sx = {
  113         .lc_name = "sx",
  114         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
  115 #ifdef DDB
  116         .lc_ddb_show = db_show_sx,
  117 #endif
  118         .lc_lock = lock_sx,
  119         .lc_unlock = unlock_sx,
  120 };
  121 
  122 #ifndef INVARIANTS
  123 #define _sx_assert(sx, what, file, line)
  124 #endif
  125 
  126 void
  127 lock_sx(struct lock_object *lock, int how)
  128 {
  129         struct sx *sx;
  130 
  131         sx = (struct sx *)lock;
  132         if (how)
  133                 sx_xlock(sx);
  134         else
  135                 sx_slock(sx);
  136 }
  137 
  138 int
  139 unlock_sx(struct lock_object *lock)
  140 {
  141         struct sx *sx;
  142 
  143         sx = (struct sx *)lock;
  144         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
  145         if (sx_xlocked(sx)) {
  146                 sx_xunlock(sx);
  147                 return (1);
  148         } else {
  149                 sx_sunlock(sx);
  150                 return (0);
  151         }
  152 }
  153 
  154 void
  155 sx_sysinit(void *arg)
  156 {
  157         struct sx_args *sargs = arg;
  158 
  159         sx_init(sargs->sa_sx, sargs->sa_desc);
  160 }
  161 
  162 void
  163 sx_init_flags(struct sx *sx, const char *description, int opts)
  164 {
  165         int flags;
  166 
  167         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
  168             SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
  169 
  170         flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
  171         if (opts & SX_DUPOK)
  172                 flags |= LO_DUPOK;
  173         if (opts & SX_NOPROFILE)
  174                 flags |= LO_NOPROFILE;
  175         if (!(opts & SX_NOWITNESS))
  176                 flags |= LO_WITNESS;
  177         if (opts & SX_QUIET)
  178                 flags |= LO_QUIET;
  179 
  180         flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
  181         sx->sx_lock = SX_LOCK_UNLOCKED;
  182         sx->sx_recurse = 0;
  183         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
  184 }
  185 
  186 void
  187 sx_destroy(struct sx *sx)
  188 {
  189 
  190         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
  191         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
  192         sx->sx_lock = SX_LOCK_DESTROYED;
  193         lock_destroy(&sx->lock_object);
  194 }
  195 
  196 int
  197 _sx_slock(struct sx *sx, int opts, const char *file, int line)
  198 {
  199         int error = 0;
  200 
  201         MPASS(curthread != NULL);
  202         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  203             ("sx_slock() of destroyed sx @ %s:%d", file, line));
  204         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
  205         error = __sx_slock(sx, opts, file, line);
  206         if (!error) {
  207                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
  208                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
  209                 curthread->td_locks++;
  210         }
  211 
  212         return (error);
  213 }
  214 
  215 int
  216 _sx_try_slock(struct sx *sx, const char *file, int line)
  217 {
  218         uintptr_t x;
  219 
  220         for (;;) {
  221                 x = sx->sx_lock;
  222                 KASSERT(x != SX_LOCK_DESTROYED,
  223                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
  224                 if (!(x & SX_LOCK_SHARED))
  225                         break;
  226                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
  227                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
  228                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
  229                         curthread->td_locks++;
  230                         return (1);
  231                 }
  232         }
  233 
  234         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
  235         return (0);
  236 }
  237 
  238 int
  239 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
  240 {
  241         int error = 0;
  242 
  243         MPASS(curthread != NULL);
  244         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  245             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
  246         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
  247             line);
  248         error = __sx_xlock(sx, curthread, opts, file, line);
  249         if (!error) {
  250                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
  251                     file, line);
  252                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  253                 curthread->td_locks++;
  254         }
  255 
  256         return (error);
  257 }
  258 
  259 int
  260 _sx_try_xlock(struct sx *sx, const char *file, int line)
  261 {
  262         int rval;
  263 
  264         MPASS(curthread != NULL);
  265         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  266             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
  267 
  268         if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) {
  269                 sx->sx_recurse++;
  270                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  271                 rval = 1;
  272         } else
  273                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
  274                     (uintptr_t)curthread);
  275         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
  276         if (rval) {
  277                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  278                     file, line);
  279                 curthread->td_locks++;
  280         }
  281 
  282         return (rval);
  283 }
  284 
  285 void
  286 _sx_sunlock(struct sx *sx, const char *file, int line)
  287 {
  288 
  289         MPASS(curthread != NULL);
  290         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  291             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
  292         _sx_assert(sx, SA_SLOCKED, file, line);
  293         curthread->td_locks--;
  294         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
  295         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
  296 #ifdef LOCK_PROFILING_SHARED
  297         if (SX_SHARERS(sx->sx_lock) == 1)
  298                 lock_profile_release_lock(&sx->lock_object);
  299 #endif
  300         __sx_sunlock(sx, file, line);
  301 }
  302 
  303 void
  304 _sx_xunlock(struct sx *sx, const char *file, int line)
  305 {
  306 
  307         MPASS(curthread != NULL);
  308         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  309             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
  310         _sx_assert(sx, SA_XLOCKED, file, line);
  311         curthread->td_locks--;
  312         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
  313         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
  314             line);
  315         if (!sx_recursed(sx))
  316                 lock_profile_release_lock(&sx->lock_object);
  317         __sx_xunlock(sx, curthread, file, line);
  318 }
  319 
  320 /*
  321  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
  322  * This will only succeed if this thread holds a single shared lock.
  323  * Return 1 if if the upgrade succeed, 0 otherwise.
  324  */
  325 int
  326 _sx_try_upgrade(struct sx *sx, const char *file, int line)
  327 {
  328         uintptr_t x;
  329         int success;
  330 
  331         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  332             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
  333         _sx_assert(sx, SA_SLOCKED, file, line);
  334 
  335         /*
  336          * Try to switch from one shared lock to an exclusive lock.  We need
  337          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
  338          * we will wake up the exclusive waiters when we drop the lock.
  339          */
  340         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
  341         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
  342             (uintptr_t)curthread | x);
  343         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
  344         if (success)
  345                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  346                     file, line);
  347         return (success);
  348 }
  349 
  350 /*
  351  * Downgrade an unrecursed exclusive lock into a single shared lock.
  352  */
  353 void
  354 _sx_downgrade(struct sx *sx, const char *file, int line)
  355 {
  356         uintptr_t x;
  357 
  358         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
  359             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
  360         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
  361 #ifndef INVARIANTS
  362         if (sx_recursed(sx))
  363                 panic("downgrade of a recursed lock");
  364 #endif
  365 
  366         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
  367 
  368         /*
  369          * Try to switch from an exclusive lock with no shared waiters
  370          * to one sharer with no shared waiters.  If there are
  371          * exclusive waiters, we don't need to lock the sleep queue so
  372          * long as we preserve the flag.  We do one quick try and if
  373          * that fails we grab the sleepq lock to keep the flags from
  374          * changing and do it the slow way.
  375          *
  376          * We have to lock the sleep queue if there are shared waiters
  377          * so we can wake them up.
  378          */
  379         x = sx->sx_lock;
  380         if (!(x & SX_LOCK_SHARED_WAITERS) &&
  381             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
  382             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
  383                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  384                 return;
  385         }
  386 
  387         /*
  388          * Lock the sleep queue so we can read the waiters bits
  389          * without any races and wakeup any shared waiters.
  390          */
  391         sleepq_lock(&sx->lock_object);
  392 
  393         /*
  394          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
  395          * shared lock.  If there are any shared waiters, wake them up.
  396          */
  397         x = sx->sx_lock;
  398         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
  399             (x & SX_LOCK_EXCLUSIVE_WAITERS));
  400         if (x & SX_LOCK_SHARED_WAITERS)
  401                 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
  402                     SQ_SHARED_QUEUE);
  403         else
  404                 sleepq_release(&sx->lock_object);
  405 
  406         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
  407 }
  408 
  409 /*
  410  * This function represents the so-called 'hard case' for sx_xlock
  411  * operation.  All 'easy case' failures are redirected to this.  Note
  412  * that ideally this would be a static function, but it needs to be
  413  * accessible from at least sx.h.
  414  */
  415 int
  416 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
  417     int line)
  418 {
  419         GIANT_DECLARE;
  420 #ifdef ADAPTIVE_SX
  421         volatile struct thread *owner;
  422 #endif
  423         uint64_t waittime = 0;
  424         uintptr_t x;
  425         int contested = 0, error = 0;
  426 
  427         /* If we already hold an exclusive lock, then recurse. */
  428         if (sx_xlocked(sx)) {
  429                 KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0,
  430             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
  431                     sx->lock_object.lo_name, file, line));
  432                 sx->sx_recurse++;
  433                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  434                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  435                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
  436                 return (0);
  437         }
  438 
  439         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  440                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
  441                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
  442 
  443         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
  444 #ifdef ADAPTIVE_SX
  445                 /*
  446                  * If the lock is write locked and the owner is
  447                  * running on another CPU, spin until the owner stops
  448                  * running or the state of the lock changes.
  449                  */
  450                 x = sx->sx_lock;
  451                 if (!(x & SX_LOCK_SHARED) &&
  452                     (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
  453                         x = SX_OWNER(x);
  454                         owner = (struct thread *)x;
  455                         if (TD_IS_RUNNING(owner)) {
  456                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  457                                         CTR3(KTR_LOCK,
  458                                             "%s: spinning on %p held by %p",
  459                                             __func__, sx, owner);
  460                                 GIANT_SAVE();
  461                                 lock_profile_obtain_lock_failed(
  462                                     &sx->lock_object, &contested, &waittime);
  463                                 while (SX_OWNER(sx->sx_lock) == x &&
  464                                     TD_IS_RUNNING(owner))
  465                                         cpu_spinwait();
  466                                 continue;
  467                         }
  468                 }
  469 #endif
  470 
  471                 sleepq_lock(&sx->lock_object);
  472                 x = sx->sx_lock;
  473 
  474                 /*
  475                  * If the lock was released while spinning on the
  476                  * sleep queue chain lock, try again.
  477                  */
  478                 if (x == SX_LOCK_UNLOCKED) {
  479                         sleepq_release(&sx->lock_object);
  480                         continue;
  481                 }
  482 
  483 #ifdef ADAPTIVE_SX
  484                 /*
  485                  * The current lock owner might have started executing
  486                  * on another CPU (or the lock could have changed
  487                  * owners) while we were waiting on the sleep queue
  488                  * chain lock.  If so, drop the sleep queue lock and try
  489                  * again.
  490                  */
  491                 if (!(x & SX_LOCK_SHARED) &&
  492                     (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
  493                         owner = (struct thread *)SX_OWNER(x);
  494                         if (TD_IS_RUNNING(owner)) {
  495                                 sleepq_release(&sx->lock_object);
  496                                 continue;
  497                         }
  498                 }
  499 #endif
  500 
  501                 /*
  502                  * If an exclusive lock was released with both shared
  503                  * and exclusive waiters and a shared waiter hasn't
  504                  * woken up and acquired the lock yet, sx_lock will be
  505                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
  506                  * If we see that value, try to acquire it once.  Note
  507                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
  508                  * as there are other exclusive waiters still.  If we
  509                  * fail, restart the loop.
  510                  */
  511                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
  512                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
  513                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
  514                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
  515                                 sleepq_release(&sx->lock_object);
  516                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
  517                                     __func__, sx);
  518                                 break;
  519                         }
  520                         sleepq_release(&sx->lock_object);
  521                         continue;
  522                 }
  523 
  524                 /*
  525                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
  526                  * than loop back and retry.
  527                  */
  528                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
  529                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  530                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
  531                                 sleepq_release(&sx->lock_object);
  532                                 continue;
  533                         }
  534                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  535                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
  536                                     __func__, sx);
  537                 }
  538 
  539                 /*
  540                  * Since we have been unable to acquire the exclusive
  541                  * lock and the exclusive waiters flag is set, we have
  542                  * to sleep.
  543                  */
  544                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  545                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  546                             __func__, sx);
  547 
  548                 GIANT_SAVE();
  549                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  550                     &waittime);
  551                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  552                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  553                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
  554                 if (!(opts & SX_INTERRUPTIBLE))
  555                         sleepq_wait(&sx->lock_object);
  556                 else
  557                         error = sleepq_wait_sig(&sx->lock_object);
  558 
  559                 if (error) {
  560                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  561                                 CTR2(KTR_LOCK,
  562                         "%s: interruptible sleep by %p suspended by signal",
  563                                     __func__, sx);
  564                         break;
  565                 }
  566                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  567                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  568                             __func__, sx);
  569         }
  570 
  571         GIANT_RESTORE();
  572         if (!error)
  573                 lock_profile_obtain_lock_success(&sx->lock_object, contested,
  574                     waittime, file, line);
  575         return (error);
  576 }
  577 
  578 /*
  579  * This function represents the so-called 'hard case' for sx_xunlock
  580  * operation.  All 'easy case' failures are redirected to this.  Note
  581  * that ideally this would be a static function, but it needs to be
  582  * accessible from at least sx.h.
  583  */
  584 void
  585 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
  586 {
  587         uintptr_t x;
  588         int queue;
  589 
  590         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
  591 
  592         /* If the lock is recursed, then unrecurse one level. */
  593         if (sx_xlocked(sx) && sx_recursed(sx)) {
  594                 if ((--sx->sx_recurse) == 0)
  595                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
  596                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  597                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
  598                 return;
  599         }
  600         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
  601             SX_LOCK_EXCLUSIVE_WAITERS));
  602         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  603                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
  604 
  605         sleepq_lock(&sx->lock_object);
  606         x = SX_LOCK_UNLOCKED;
  607 
  608         /*
  609          * The wake up algorithm here is quite simple and probably not
  610          * ideal.  It gives precedence to shared waiters if they are
  611          * present.  For this condition, we have to preserve the
  612          * state of the exclusive waiters flag.
  613          */
  614         if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) {
  615                 queue = SQ_SHARED_QUEUE;
  616                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
  617         } else
  618                 queue = SQ_EXCLUSIVE_QUEUE;
  619 
  620         /* Wake up all the waiters for the specific queue. */
  621         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  622                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
  623                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
  624                     "exclusive");
  625         atomic_store_rel_ptr(&sx->sx_lock, x);
  626         sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue);
  627 }
  628 
  629 /*
  630  * This function represents the so-called 'hard case' for sx_slock
  631  * operation.  All 'easy case' failures are redirected to this.  Note
  632  * that ideally this would be a static function, but it needs to be
  633  * accessible from at least sx.h.
  634  */
  635 int
  636 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
  637 {
  638         GIANT_DECLARE;
  639 #ifdef ADAPTIVE_SX
  640         volatile struct thread *owner;
  641 #endif
  642 #ifdef LOCK_PROFILING_SHARED
  643         uint64_t waittime = 0;
  644         int contested = 0;
  645 #endif
  646         uintptr_t x;
  647         int error = 0;
  648 
  649         /*
  650          * As with rwlocks, we don't make any attempt to try to block
  651          * shared locks once there is an exclusive waiter.
  652          */
  653         for (;;) {
  654                 x = sx->sx_lock;
  655 
  656                 /*
  657                  * If no other thread has an exclusive lock then try to bump up
  658                  * the count of sharers.  Since we have to preserve the state
  659                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
  660                  * shared lock loop back and retry.
  661                  */
  662                 if (x & SX_LOCK_SHARED) {
  663                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
  664                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
  665                             x + SX_ONE_SHARER)) {
  666 #ifdef LOCK_PROFILING_SHARED
  667                                 if (SX_SHARERS(x) == 0)
  668                                         lock_profile_obtain_lock_success(
  669                                             &sx->lock_object, contested,
  670                                             waittime, file, line);
  671 #endif
  672                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  673                                         CTR4(KTR_LOCK,
  674                                             "%s: %p succeed %p -> %p", __func__,
  675                                             sx, (void *)x,
  676                                             (void *)(x + SX_ONE_SHARER));
  677                                 break;
  678                         }
  679                         continue;
  680                 }
  681 
  682 #ifdef ADAPTIVE_SX
  683                 /*
  684                  * If the owner is running on another CPU, spin until
  685                  * the owner stops running or the state of the lock
  686                  * changes.
  687                  */
  688                 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
  689                         x = SX_OWNER(x);
  690                         owner = (struct thread *)x;
  691                         if (TD_IS_RUNNING(owner)) {
  692                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  693                                         CTR3(KTR_LOCK,
  694                                             "%s: spinning on %p held by %p",
  695                                             __func__, sx, owner);
  696                                 GIANT_SAVE();
  697 #ifdef LOCK_PROFILING_SHARED
  698                                 lock_profile_obtain_lock_failed(
  699                                     &sx->lock_object, &contested, &waittime);
  700 #endif
  701                                 while (SX_OWNER(sx->sx_lock) == x &&
  702                                     TD_IS_RUNNING(owner))
  703                                         cpu_spinwait();
  704                                 continue;
  705                         }
  706                 }
  707 #endif
  708 
  709                 /*
  710                  * Some other thread already has an exclusive lock, so
  711                  * start the process of blocking.
  712                  */
  713                 sleepq_lock(&sx->lock_object);
  714                 x = sx->sx_lock;
  715 
  716                 /*
  717                  * The lock could have been released while we spun.
  718                  * In this case loop back and retry.
  719                  */
  720                 if (x & SX_LOCK_SHARED) {
  721                         sleepq_release(&sx->lock_object);
  722                         continue;
  723                 }
  724 
  725 #ifdef ADAPTIVE_SX
  726                 /*
  727                  * If the owner is running on another CPU, spin until
  728                  * the owner stops running or the state of the lock
  729                  * changes.
  730                  */
  731                 if (!(x & SX_LOCK_SHARED) &&
  732                     (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
  733                         owner = (struct thread *)SX_OWNER(x);
  734                         if (TD_IS_RUNNING(owner)) {
  735                                 sleepq_release(&sx->lock_object);
  736                                 continue;
  737                         }
  738                 }
  739 #endif
  740 
  741                 /*
  742                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
  743                  * fail to set it drop the sleep queue lock and loop
  744                  * back.
  745                  */
  746                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
  747                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
  748                             x | SX_LOCK_SHARED_WAITERS)) {
  749                                 sleepq_release(&sx->lock_object);
  750                                 continue;
  751                         }
  752                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  753                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
  754                                     __func__, sx);
  755                 }
  756 
  757                 /*
  758                  * Since we have been unable to acquire the shared lock,
  759                  * we have to sleep.
  760                  */
  761                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  762                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
  763                             __func__, sx);
  764 
  765                 GIANT_SAVE();
  766 #ifdef LOCK_PROFILING_SHARED
  767                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
  768                     &waittime);
  769 #endif
  770                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
  771                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
  772                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
  773                 if (!(opts & SX_INTERRUPTIBLE))
  774                         sleepq_wait(&sx->lock_object);
  775                 else
  776                         error = sleepq_wait_sig(&sx->lock_object);
  777 
  778                 if (error) {
  779                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
  780                                 CTR2(KTR_LOCK,
  781                         "%s: interruptible sleep by %p suspended by signal",
  782                                     __func__, sx);
  783                         break;
  784                 }
  785                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  786                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
  787                             __func__, sx);
  788         }
  789 
  790         GIANT_RESTORE();
  791         return (error);
  792 }
  793 
  794 /*
  795  * This function represents the so-called 'hard case' for sx_sunlock
  796  * operation.  All 'easy case' failures are redirected to this.  Note
  797  * that ideally this would be a static function, but it needs to be
  798  * accessible from at least sx.h.
  799  */
  800 void
  801 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
  802 {
  803         uintptr_t x;
  804 
  805         for (;;) {
  806                 x = sx->sx_lock;
  807 
  808                 /*
  809                  * We should never have sharers while at least one thread
  810                  * holds a shared lock.
  811                  */
  812                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
  813                     ("%s: waiting sharers", __func__));
  814 
  815                 /*
  816                  * See if there is more than one shared lock held.  If
  817                  * so, just drop one and return.
  818                  */
  819                 if (SX_SHARERS(x) > 1) {
  820                         if (atomic_cmpset_ptr(&sx->sx_lock, x,
  821                             x - SX_ONE_SHARER)) {
  822                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  823                                         CTR4(KTR_LOCK,
  824                                             "%s: %p succeeded %p -> %p",
  825                                             __func__, sx, (void *)x,
  826                                             (void *)(x - SX_ONE_SHARER));
  827                                 break;
  828                         }
  829                         continue;
  830                 }
  831 
  832                 /*
  833                  * If there aren't any waiters for an exclusive lock,
  834                  * then try to drop it quickly.
  835                  */
  836                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
  837                         MPASS(x == SX_SHARERS_LOCK(1));
  838                         if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
  839                             SX_LOCK_UNLOCKED)) {
  840                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  841                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
  842                                             __func__, sx);
  843                                 break;
  844                         }
  845                         continue;
  846                 }
  847 
  848                 /*
  849                  * At this point, there should just be one sharer with
  850                  * exclusive waiters.
  851                  */
  852                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
  853 
  854                 sleepq_lock(&sx->lock_object);
  855 
  856                 /*
  857                  * Wake up semantic here is quite simple:
  858                  * Just wake up all the exclusive waiters.
  859                  * Note that the state of the lock could have changed,
  860                  * so if it fails loop back and retry.
  861                  */
  862                 if (!atomic_cmpset_ptr(&sx->sx_lock,
  863                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
  864                     SX_LOCK_UNLOCKED)) {
  865                         sleepq_release(&sx->lock_object);
  866                         continue;
  867                 }
  868                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
  869                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
  870                             "exclusive queue", __func__, sx);
  871                 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
  872                     SQ_EXCLUSIVE_QUEUE);
  873                 break;
  874         }
  875 }
  876 
  877 #ifdef INVARIANT_SUPPORT
  878 #ifndef INVARIANTS
  879 #undef  _sx_assert
  880 #endif
  881 
  882 /*
  883  * In the non-WITNESS case, sx_assert() can only detect that at least
  884  * *some* thread owns an slock, but it cannot guarantee that *this*
  885  * thread owns an slock.
  886  */
  887 void
  888 _sx_assert(struct sx *sx, int what, const char *file, int line)
  889 {
  890 #ifndef WITNESS
  891         int slocked = 0;
  892 #endif
  893 
  894         if (panicstr != NULL)
  895                 return;
  896         switch (what) {
  897         case SA_SLOCKED:
  898         case SA_SLOCKED | SA_NOTRECURSED:
  899         case SA_SLOCKED | SA_RECURSED:
  900 #ifndef WITNESS
  901                 slocked = 1;
  902                 /* FALLTHROUGH */
  903 #endif
  904         case SA_LOCKED:
  905         case SA_LOCKED | SA_NOTRECURSED:
  906         case SA_LOCKED | SA_RECURSED:
  907 #ifdef WITNESS
  908                 witness_assert(&sx->lock_object, what, file, line);
  909 #else
  910                 /*
  911                  * If some other thread has an exclusive lock or we
  912                  * have one and are asserting a shared lock, fail.
  913                  * Also, if no one has a lock at all, fail.
  914                  */
  915                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
  916                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
  917                     sx_xholder(sx) != curthread)))
  918                         panic("Lock %s not %slocked @ %s:%d\n",
  919                             sx->lock_object.lo_name, slocked ? "share " : "",
  920                             file, line);
  921 
  922                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
  923                         if (sx_recursed(sx)) {
  924                                 if (what & SA_NOTRECURSED)
  925                                         panic("Lock %s recursed @ %s:%d\n",
  926                                             sx->lock_object.lo_name, file,
  927                                             line);
  928                         } else if (what & SA_RECURSED)
  929                                 panic("Lock %s not recursed @ %s:%d\n",
  930                                     sx->lock_object.lo_name, file, line);
  931                 }
  932 #endif
  933                 break;
  934         case SA_XLOCKED:
  935         case SA_XLOCKED | SA_NOTRECURSED:
  936         case SA_XLOCKED | SA_RECURSED:
  937                 if (sx_xholder(sx) != curthread)
  938                         panic("Lock %s not exclusively locked @ %s:%d\n",
  939                             sx->lock_object.lo_name, file, line);
  940                 if (sx_recursed(sx)) {
  941                         if (what & SA_NOTRECURSED)
  942                                 panic("Lock %s recursed @ %s:%d\n",
  943                                     sx->lock_object.lo_name, file, line);
  944                 } else if (what & SA_RECURSED)
  945                         panic("Lock %s not recursed @ %s:%d\n",
  946                             sx->lock_object.lo_name, file, line);
  947                 break;
  948         case SA_UNLOCKED:
  949 #ifdef WITNESS
  950                 witness_assert(&sx->lock_object, what, file, line);
  951 #else
  952                 /*
  953                  * If we hold an exclusve lock fail.  We can't
  954                  * reliably check to see if we hold a shared lock or
  955                  * not.
  956                  */
  957                 if (sx_xholder(sx) == curthread)
  958                         panic("Lock %s exclusively locked @ %s:%d\n",
  959                             sx->lock_object.lo_name, file, line);
  960 #endif
  961                 break;
  962         default:
  963                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
  964                     line);
  965         }
  966 }
  967 #endif  /* INVARIANT_SUPPORT */
  968 
  969 #ifdef DDB
  970 static void
  971 db_show_sx(struct lock_object *lock)
  972 {
  973         struct thread *td;
  974         struct sx *sx;
  975 
  976         sx = (struct sx *)lock;
  977 
  978         db_printf(" state: ");
  979         if (sx->sx_lock == SX_LOCK_UNLOCKED)
  980                 db_printf("UNLOCKED\n");
  981         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
  982                 db_printf("DESTROYED\n");
  983                 return;
  984         } else if (sx->sx_lock & SX_LOCK_SHARED)
  985                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
  986         else {
  987                 td = sx_xholder(sx);
  988                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
  989                     td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
  990                 if (sx_recursed(sx))
  991                         db_printf(" recursed: %d\n", sx->sx_recurse);
  992         }
  993 
  994         db_printf(" waiters: ");
  995         switch(sx->sx_lock &
  996             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
  997         case SX_LOCK_SHARED_WAITERS:
  998                 db_printf("shared\n");
  999                 break;
 1000         case SX_LOCK_EXCLUSIVE_WAITERS:
 1001                 db_printf("exclusive\n");
 1002                 break;
 1003         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
 1004                 db_printf("exclusive and shared\n");
 1005                 break;
 1006         default:
 1007                 db_printf("none\n");
 1008         }
 1009 }
 1010 
 1011 /*
 1012  * Check to see if a thread that is blocked on a sleep queue is actually
 1013  * blocked on an sx lock.  If so, output some details and return true.
 1014  * If the lock has an exclusive owner, return that in *ownerp.
 1015  */
 1016 int
 1017 sx_chain(struct thread *td, struct thread **ownerp)
 1018 {
 1019         struct sx *sx;
 1020 
 1021         /*
 1022          * Check to see if this thread is blocked on an sx lock.
 1023          * First, we check the lock class.  If that is ok, then we
 1024          * compare the lock name against the wait message.
 1025          */
 1026         sx = td->td_wchan;
 1027         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
 1028             sx->lock_object.lo_name != td->td_wmesg)
 1029                 return (0);
 1030 
 1031         /* We think we have an sx lock, so output some details. */
 1032         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
 1033         *ownerp = sx_xholder(sx);
 1034         if (sx->sx_lock & SX_LOCK_SHARED)
 1035                 db_printf("SLOCK (count %ju)\n",
 1036                     (uintmax_t)SX_SHARERS(sx->sx_lock));
 1037         else
 1038                 db_printf("XLOCK\n");
 1039         return (1);
 1040 }
 1041 #endif

Cache object: da17919c70c60f8cabea427910329a58


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.