The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include "opt_adaptive_lockmgrs.h"
   30 #include "opt_ddb.h"
   31 #include "opt_kdtrace.h"
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_lock.c 224704 2011-08-08 08:22:15Z kib $");
   35 
   36 #include <sys/param.h>
   37 #include <sys/ktr.h>
   38 #include <sys/lock.h>
   39 #include <sys/lock_profile.h>
   40 #include <sys/lockmgr.h>
   41 #include <sys/mutex.h>
   42 #include <sys/proc.h>
   43 #include <sys/sleepqueue.h>
   44 #ifdef DEBUG_LOCKS
   45 #include <sys/stack.h>
   46 #endif
   47 #include <sys/sysctl.h>
   48 #include <sys/systm.h>
   49 
   50 #include <machine/cpu.h>
   51 
   52 #ifdef DDB
   53 #include <ddb/ddb.h>
   54 #endif
   55 
   56 CTASSERT(((LK_ADAPTIVE | LK_EXSLPFAIL | LK_NOSHARE) & LO_CLASSFLAGS) ==
   57     (LK_ADAPTIVE | LK_EXSLPFAIL | LK_NOSHARE));
   58 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
   59     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
   60 
   61 #define SQ_EXCLUSIVE_QUEUE      0
   62 #define SQ_SHARED_QUEUE         1
   63 
   64 #ifdef ADAPTIVE_LOCKMGRS
   65 #define ALK_RETRIES             10
   66 #define ALK_LOOPS               10000
   67 #endif
   68 
   69 #ifndef INVARIANTS
   70 #define _lockmgr_assert(lk, what, file, line)
   71 #define TD_LOCKS_INC(td)
   72 #define TD_LOCKS_DEC(td)
   73 #else
   74 #define TD_LOCKS_INC(td)        ((td)->td_locks++)
   75 #define TD_LOCKS_DEC(td)        ((td)->td_locks--)
   76 #endif
   77 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
   78 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
   79 
   80 #ifndef DEBUG_LOCKS
   81 #define STACK_PRINT(lk)
   82 #define STACK_SAVE(lk)
   83 #define STACK_ZERO(lk)
   84 #else
   85 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
   86 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
   87 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
   88 #endif
   89 
   90 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
   91         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   92                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
   93 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
   94         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   95                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
   96 
   97 #define GIANT_DECLARE                                                   \
   98         int _i = 0;                                                     \
   99         WITNESS_SAVE_DECL(Giant)
  100 #define GIANT_RESTORE() do {                                            \
  101         if (_i > 0) {                                                   \
  102                 while (_i--)                                            \
  103                         mtx_lock(&Giant);                               \
  104                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  105         }                                                               \
  106 } while (0)
  107 #define GIANT_SAVE() do {                                               \
  108         if (mtx_owned(&Giant)) {                                        \
  109                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
  110                 while (mtx_owned(&Giant)) {                             \
  111                         _i++;                                           \
  112                         mtx_unlock(&Giant);                             \
  113                 }                                                       \
  114         }                                                               \
  115 } while (0)
  116 
  117 #define LK_CAN_SHARE(x)                                                 \
  118         (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||      \
  119         ((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||                           \
  120         curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  121 #define LK_TRYOP(x)                                                     \
  122         ((x) & LK_NOWAIT)
  123 
  124 #define LK_CAN_WITNESS(x)                                               \
  125         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  126 #define LK_TRYWIT(x)                                                    \
  127         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  128 
  129 #define LK_CAN_ADAPT(lk, f)                                             \
  130         (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&             \
  131         ((f) & LK_SLEEPFAIL) == 0)
  132 
  133 #define lockmgr_disowned(lk)                                            \
  134         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  135 
  136 #define lockmgr_xlocked(lk)                                             \
  137         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  138 
  139 static void      assert_lockmgr(struct lock_object *lock, int how);
  140 #ifdef DDB
  141 static void      db_show_lockmgr(struct lock_object *lock);
  142 #endif
  143 static void      lock_lockmgr(struct lock_object *lock, int how);
  144 #ifdef KDTRACE_HOOKS
  145 static int       owner_lockmgr(struct lock_object *lock, struct thread **owner);
  146 #endif
  147 static int       unlock_lockmgr(struct lock_object *lock);
  148 
  149 struct lock_class lock_class_lockmgr = {
  150         .lc_name = "lockmgr",
  151         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  152         .lc_assert = assert_lockmgr,
  153 #ifdef DDB
  154         .lc_ddb_show = db_show_lockmgr,
  155 #endif
  156         .lc_lock = lock_lockmgr,
  157         .lc_unlock = unlock_lockmgr,
  158 #ifdef KDTRACE_HOOKS
  159         .lc_owner = owner_lockmgr,
  160 #endif
  161 };
  162 
  163 static __inline struct thread *
  164 lockmgr_xholder(struct lock *lk)
  165 {
  166         uintptr_t x;
  167 
  168         x = lk->lk_lock;
  169         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  170 }
  171 
  172 /*
  173  * It assumes sleepq_lock held and returns with this one unheld.
  174  * It also assumes the generic interlock is sane and previously checked.
  175  * If LK_INTERLOCK is specified the interlock is not reacquired after the
  176  * sleep.
  177  */
  178 static __inline int
  179 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  180     const char *wmesg, int pri, int timo, int queue)
  181 {
  182         GIANT_DECLARE;
  183         struct lock_class *class;
  184         int catch, error;
  185 
  186         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  187         catch = pri & PCATCH;
  188         pri &= PRIMASK;
  189         error = 0;
  190 
  191         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  192             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  193 
  194         if (flags & LK_INTERLOCK)
  195                 class->lc_unlock(ilk);
  196 
  197         /*
  198          * LK_EXSLPFAIL is not invariant during the lock pattern but it is
  199          * always protected by the sleepqueue spinlock, thus it is safe to
  200          * handle within the lo_flags.
  201          */
  202         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
  203                 lk->lock_object.lo_flags |= LK_EXSLPFAIL;
  204         GIANT_SAVE();
  205         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  206             SLEEPQ_INTERRUPTIBLE : 0), queue);
  207         if ((flags & LK_TIMELOCK) && timo)
  208                 sleepq_set_timeout(&lk->lock_object, timo);
  209 
  210         /*
  211          * Decisional switch for real sleeping.
  212          */
  213         if ((flags & LK_TIMELOCK) && timo && catch)
  214                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
  215         else if ((flags & LK_TIMELOCK) && timo)
  216                 error = sleepq_timedwait(&lk->lock_object, pri);
  217         else if (catch)
  218                 error = sleepq_wait_sig(&lk->lock_object, pri);
  219         else
  220                 sleepq_wait(&lk->lock_object, pri);
  221         GIANT_RESTORE();
  222         if ((flags & LK_SLEEPFAIL) && error == 0)
  223                 error = ENOLCK;
  224 
  225         return (error);
  226 }
  227 
  228 static __inline int
  229 wakeupshlk(struct lock *lk, const char *file, int line)
  230 {
  231         uintptr_t v, x;
  232         u_int realexslp;
  233         int queue, wakeup_swapper;
  234 
  235         TD_LOCKS_DEC(curthread);
  236         TD_SLOCKS_DEC(curthread);
  237         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  238         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  239 
  240         wakeup_swapper = 0;
  241         for (;;) {
  242                 x = lk->lk_lock;
  243 
  244                 /*
  245                  * If there is more than one shared lock held, just drop one
  246                  * and return.
  247                  */
  248                 if (LK_SHARERS(x) > 1) {
  249                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  250                             x - LK_ONE_SHARER))
  251                                 break;
  252                         continue;
  253                 }
  254 
  255                 /*
  256                  * If there are not waiters on the exclusive queue, drop the
  257                  * lock quickly.
  258                  */
  259                 if ((x & LK_ALL_WAITERS) == 0) {
  260                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  261                             LK_SHARERS_LOCK(1));
  262                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  263                                 break;
  264                         continue;
  265                 }
  266 
  267                 /*
  268                  * We should have a sharer with waiters, so enter the hard
  269                  * path in order to handle wakeups correctly.
  270                  */
  271                 sleepq_lock(&lk->lock_object);
  272                 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  273                 v = LK_UNLOCKED;
  274 
  275                 /*
  276                  * If the lock has exclusive waiters, give them preference in
  277                  * order to avoid deadlock with shared runners up.
  278                  * If interruptible sleeps left the exclusive queue empty
  279                  * avoid a starvation for the threads sleeping on the shared
  280                  * queue by giving them precedence and cleaning up the
  281                  * exclusive waiters bit anyway.
  282                  * Please note that the LK_EXSLPFAIL flag may be lying about
  283                  * the real presence of waiters with the LK_SLEEPFAIL flag on
  284                  * because they may be used in conjuction with interruptible
  285                  * sleeps.
  286                  */
  287                 realexslp = sleepq_sleepcnt(&lk->lock_object,
  288                     SQ_EXCLUSIVE_QUEUE);
  289                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  290                         if ((lk->lock_object.lo_flags & LK_EXSLPFAIL) == 0) {
  291                                 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
  292                                 queue = SQ_EXCLUSIVE_QUEUE;
  293                                 v |= (x & LK_SHARED_WAITERS);
  294                         } else {
  295                                 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
  296                                 LOCK_LOG2(lk,
  297                                     "%s: %p has only LK_SLEEPFAIL sleepers",
  298                                     __func__, lk);
  299                                 LOCK_LOG2(lk,
  300                             "%s: %p waking up threads on the exclusive queue",
  301                                     __func__, lk);
  302                                 wakeup_swapper =
  303                                     sleepq_broadcast(&lk->lock_object,
  304                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  305                                 queue = SQ_SHARED_QUEUE;
  306                         }
  307                                 
  308                 } else {
  309 
  310                         /*
  311                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
  312                          * and using interruptible sleeps/timeout may have
  313                          * left spourious LK_EXSLPFAIL flag on, so clean
  314                          * it up anyway.
  315                          */
  316                         lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
  317                         queue = SQ_SHARED_QUEUE;
  318                 }
  319 
  320                 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  321                     v)) {
  322                         sleepq_release(&lk->lock_object);
  323                         continue;
  324                 }
  325                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  326                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  327                     "exclusive");
  328                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  329                     0, queue);
  330                 sleepq_release(&lk->lock_object);
  331                 break;
  332         }
  333 
  334         lock_profile_release_lock(&lk->lock_object);
  335         return (wakeup_swapper);
  336 }
  337 
  338 static void
  339 assert_lockmgr(struct lock_object *lock, int what)
  340 {
  341 
  342         panic("lockmgr locks do not support assertions");
  343 }
  344 
  345 static void
  346 lock_lockmgr(struct lock_object *lock, int how)
  347 {
  348 
  349         panic("lockmgr locks do not support sleep interlocking");
  350 }
  351 
  352 static int
  353 unlock_lockmgr(struct lock_object *lock)
  354 {
  355 
  356         panic("lockmgr locks do not support sleep interlocking");
  357 }
  358 
  359 #ifdef KDTRACE_HOOKS
  360 static int
  361 owner_lockmgr(struct lock_object *lock, struct thread **owner)
  362 {
  363 
  364         panic("lockmgr locks do not support owner inquiring");
  365 }
  366 #endif
  367 
  368 void
  369 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  370 {
  371         int iflags;
  372 
  373         MPASS((flags & ~LK_INIT_MASK) == 0);
  374         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  375             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  376             &lk->lk_lock));
  377 
  378         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  379         if (flags & LK_CANRECURSE)
  380                 iflags |= LO_RECURSABLE;
  381         if ((flags & LK_NODUP) == 0)
  382                 iflags |= LO_DUPOK;
  383         if (flags & LK_NOPROFILE)
  384                 iflags |= LO_NOPROFILE;
  385         if ((flags & LK_NOWITNESS) == 0)
  386                 iflags |= LO_WITNESS;
  387         if (flags & LK_QUIET)
  388                 iflags |= LO_QUIET;
  389         iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  390 
  391         lk->lk_lock = LK_UNLOCKED;
  392         lk->lk_recurse = 0;
  393         lk->lk_timo = timo;
  394         lk->lk_pri = pri;
  395         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  396         STACK_ZERO(lk);
  397 }
  398 
  399 void
  400 lockdestroy(struct lock *lk)
  401 {
  402 
  403         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  404         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  405         KASSERT((lk->lock_object.lo_flags & LK_EXSLPFAIL) == 0,
  406             ("lockmgr still exclusive waiters"));
  407         lock_destroy(&lk->lock_object);
  408 }
  409 
  410 int
  411 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  412     const char *wmesg, int pri, int timo, const char *file, int line)
  413 {
  414         GIANT_DECLARE;
  415         struct lock_class *class;
  416         const char *iwmesg;
  417         uintptr_t tid, v, x;
  418         u_int op, realexslp;
  419         int error, ipri, itimo, queue, wakeup_swapper;
  420 #ifdef LOCK_PROFILING
  421         uint64_t waittime = 0;
  422         int contested = 0;
  423 #endif
  424 #ifdef ADAPTIVE_LOCKMGRS
  425         volatile struct thread *owner;
  426         u_int i, spintries = 0;
  427 #endif
  428 
  429         error = 0;
  430         tid = (uintptr_t)curthread;
  431         op = (flags & LK_TYPE_MASK);
  432         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  433         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  434         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  435 
  436         MPASS((flags & ~LK_TOTAL_MASK) == 0);
  437         KASSERT((op & (op - 1)) == 0,
  438             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  439         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  440             (op != LK_DOWNGRADE && op != LK_RELEASE),
  441             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  442             __func__, file, line));
  443         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  444             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  445             __func__, file, line));
  446 
  447         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  448         if (panicstr != NULL) {
  449                 if (flags & LK_INTERLOCK)
  450                         class->lc_unlock(ilk);
  451                 return (0);
  452         }
  453 
  454         if (lk->lock_object.lo_flags & LK_NOSHARE) {
  455                 switch (op) {
  456                 case LK_SHARED:
  457                         op = LK_EXCLUSIVE;
  458                         break;
  459                 case LK_UPGRADE:
  460                 case LK_DOWNGRADE:
  461                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
  462                             file, line);
  463                         return (0);
  464                 }
  465         }
  466 
  467         wakeup_swapper = 0;
  468         switch (op) {
  469         case LK_SHARED:
  470                 if (LK_CAN_WITNESS(flags))
  471                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  472                             file, line, ilk);
  473                 for (;;) {
  474                         x = lk->lk_lock;
  475 
  476                         /*
  477                          * If no other thread has an exclusive lock, or
  478                          * no exclusive waiter is present, bump the count of
  479                          * sharers.  Since we have to preserve the state of
  480                          * waiters, if we fail to acquire the shared lock
  481                          * loop back and retry.
  482                          */
  483                         if (LK_CAN_SHARE(x)) {
  484                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  485                                     x + LK_ONE_SHARER))
  486                                         break;
  487                                 continue;
  488                         }
  489                         lock_profile_obtain_lock_failed(&lk->lock_object,
  490                             &contested, &waittime);
  491 
  492                         /*
  493                          * If the lock is already held by curthread in
  494                          * exclusive way avoid a deadlock.
  495                          */
  496                         if (LK_HOLDER(x) == tid) {
  497                                 LOCK_LOG2(lk,
  498                                     "%s: %p already held in exclusive mode",
  499                                     __func__, lk);
  500                                 error = EDEADLK;
  501                                 break;
  502                         }
  503 
  504                         /*
  505                          * If the lock is expected to not sleep just give up
  506                          * and return.
  507                          */
  508                         if (LK_TRYOP(flags)) {
  509                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  510                                     __func__, lk);
  511                                 error = EBUSY;
  512                                 break;
  513                         }
  514 
  515 #ifdef ADAPTIVE_LOCKMGRS
  516                         /*
  517                          * If the owner is running on another CPU, spin until
  518                          * the owner stops running or the state of the lock
  519                          * changes.  We need a double-state handle here
  520                          * because for a failed acquisition the lock can be
  521                          * either held in exclusive mode or shared mode
  522                          * (for the writer starvation avoidance technique).
  523                          */
  524                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  525                             LK_HOLDER(x) != LK_KERNPROC) {
  526                                 owner = (struct thread *)LK_HOLDER(x);
  527                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  528                                         CTR3(KTR_LOCK,
  529                                             "%s: spinning on %p held by %p",
  530                                             __func__, lk, owner);
  531 
  532                                 /*
  533                                  * If we are holding also an interlock drop it
  534                                  * in order to avoid a deadlock if the lockmgr
  535                                  * owner is adaptively spinning on the
  536                                  * interlock itself.
  537                                  */
  538                                 if (flags & LK_INTERLOCK) {
  539                                         class->lc_unlock(ilk);
  540                                         flags &= ~LK_INTERLOCK;
  541                                 }
  542                                 GIANT_SAVE();
  543                                 while (LK_HOLDER(lk->lk_lock) ==
  544                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  545                                         cpu_spinwait();
  546                                 GIANT_RESTORE();
  547                                 continue;
  548                         } else if (LK_CAN_ADAPT(lk, flags) &&
  549                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  550                             spintries < ALK_RETRIES) {
  551                                 if (flags & LK_INTERLOCK) {
  552                                         class->lc_unlock(ilk);
  553                                         flags &= ~LK_INTERLOCK;
  554                                 }
  555                                 GIANT_SAVE();
  556                                 spintries++;
  557                                 for (i = 0; i < ALK_LOOPS; i++) {
  558                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  559                                                 CTR4(KTR_LOCK,
  560                                     "%s: shared spinning on %p with %u and %u",
  561                                                     __func__, lk, spintries, i);
  562                                         x = lk->lk_lock;
  563                                         if ((x & LK_SHARE) == 0 ||
  564                                             LK_CAN_SHARE(x) != 0)
  565                                                 break;
  566                                         cpu_spinwait();
  567                                 }
  568                                 GIANT_RESTORE();
  569                                 if (i != ALK_LOOPS)
  570                                         continue;
  571                         }
  572 #endif
  573 
  574                         /*
  575                          * Acquire the sleepqueue chain lock because we
  576                          * probabilly will need to manipulate waiters flags.
  577                          */
  578                         sleepq_lock(&lk->lock_object);
  579                         x = lk->lk_lock;
  580 
  581                         /*
  582                          * if the lock can be acquired in shared mode, try
  583                          * again.
  584                          */
  585                         if (LK_CAN_SHARE(x)) {
  586                                 sleepq_release(&lk->lock_object);
  587                                 continue;
  588                         }
  589 
  590 #ifdef ADAPTIVE_LOCKMGRS
  591                         /*
  592                          * The current lock owner might have started executing
  593                          * on another CPU (or the lock could have changed
  594                          * owner) while we were waiting on the turnstile
  595                          * chain lock.  If so, drop the turnstile lock and try
  596                          * again.
  597                          */
  598                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  599                             LK_HOLDER(x) != LK_KERNPROC) {
  600                                 owner = (struct thread *)LK_HOLDER(x);
  601                                 if (TD_IS_RUNNING(owner)) {
  602                                         sleepq_release(&lk->lock_object);
  603                                         continue;
  604                                 }
  605                         }
  606 #endif
  607 
  608                         /*
  609                          * Try to set the LK_SHARED_WAITERS flag.  If we fail,
  610                          * loop back and retry.
  611                          */
  612                         if ((x & LK_SHARED_WAITERS) == 0) {
  613                                 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  614                                     x | LK_SHARED_WAITERS)) {
  615                                         sleepq_release(&lk->lock_object);
  616                                         continue;
  617                                 }
  618                                 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  619                                     __func__, lk);
  620                         }
  621 
  622                         /*
  623                          * As far as we have been unable to acquire the
  624                          * shared lock and the shared waiters flag is set,
  625                          * we will sleep.
  626                          */
  627                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  628                             SQ_SHARED_QUEUE);
  629                         flags &= ~LK_INTERLOCK;
  630                         if (error) {
  631                                 LOCK_LOG3(lk,
  632                                     "%s: interrupted sleep for %p with %d",
  633                                     __func__, lk, error);
  634                                 break;
  635                         }
  636                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  637                             __func__, lk);
  638                 }
  639                 if (error == 0) {
  640                         lock_profile_obtain_lock_success(&lk->lock_object,
  641                             contested, waittime, file, line);
  642                         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  643                             line);
  644                         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  645                             line);
  646                         TD_LOCKS_INC(curthread);
  647                         TD_SLOCKS_INC(curthread);
  648                         STACK_SAVE(lk);
  649                 }
  650                 break;
  651         case LK_UPGRADE:
  652                 _lockmgr_assert(lk, KA_SLOCKED, file, line);
  653                 v = lk->lk_lock;
  654                 x = v & LK_ALL_WAITERS;
  655                 v &= LK_EXCLUSIVE_SPINNERS;
  656 
  657                 /*
  658                  * Try to switch from one shared lock to an exclusive one.
  659                  * We need to preserve waiters flags during the operation.
  660                  */
  661                 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  662                     tid | x)) {
  663                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  664                             line);
  665                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  666                             LK_TRYWIT(flags), file, line);
  667                         TD_SLOCKS_DEC(curthread);
  668                         break;
  669                 }
  670 
  671                 /*
  672                  * We have been unable to succeed in upgrading, so just
  673                  * give up the shared lock.
  674                  */
  675                 wakeup_swapper |= wakeupshlk(lk, file, line);
  676 
  677                 /* FALLTHROUGH */
  678         case LK_EXCLUSIVE:
  679                 if (LK_CAN_WITNESS(flags))
  680                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  681                             LOP_EXCLUSIVE, file, line, ilk);
  682 
  683                 /*
  684                  * If curthread already holds the lock and this one is
  685                  * allowed to recurse, simply recurse on it.
  686                  */
  687                 if (lockmgr_xlocked(lk)) {
  688                         if ((flags & LK_CANRECURSE) == 0 &&
  689                             (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  690 
  691                                 /*
  692                                  * If the lock is expected to not panic just
  693                                  * give up and return.
  694                                  */
  695                                 if (LK_TRYOP(flags)) {
  696                                         LOCK_LOG2(lk,
  697                                             "%s: %p fails the try operation",
  698                                             __func__, lk);
  699                                         error = EBUSY;
  700                                         break;
  701                                 }
  702                                 if (flags & LK_INTERLOCK)
  703                                         class->lc_unlock(ilk);
  704                 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  705                                     __func__, iwmesg, file, line);
  706                         }
  707                         lk->lk_recurse++;
  708                         LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  709                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  710                             lk->lk_recurse, file, line);
  711                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  712                             LK_TRYWIT(flags), file, line);
  713                         TD_LOCKS_INC(curthread);
  714                         break;
  715                 }
  716 
  717                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  718                     tid)) {
  719                         lock_profile_obtain_lock_failed(&lk->lock_object,
  720                             &contested, &waittime);
  721 
  722                         /*
  723                          * If the lock is expected to not sleep just give up
  724                          * and return.
  725                          */
  726                         if (LK_TRYOP(flags)) {
  727                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  728                                     __func__, lk);
  729                                 error = EBUSY;
  730                                 break;
  731                         }
  732 
  733 #ifdef ADAPTIVE_LOCKMGRS
  734                         /*
  735                          * If the owner is running on another CPU, spin until
  736                          * the owner stops running or the state of the lock
  737                          * changes.
  738                          */
  739                         x = lk->lk_lock;
  740                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  741                             LK_HOLDER(x) != LK_KERNPROC) {
  742                                 owner = (struct thread *)LK_HOLDER(x);
  743                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  744                                         CTR3(KTR_LOCK,
  745                                             "%s: spinning on %p held by %p",
  746                                             __func__, lk, owner);
  747 
  748                                 /*
  749                                  * If we are holding also an interlock drop it
  750                                  * in order to avoid a deadlock if the lockmgr
  751                                  * owner is adaptively spinning on the
  752                                  * interlock itself.
  753                                  */
  754                                 if (flags & LK_INTERLOCK) {
  755                                         class->lc_unlock(ilk);
  756                                         flags &= ~LK_INTERLOCK;
  757                                 }
  758                                 GIANT_SAVE();
  759                                 while (LK_HOLDER(lk->lk_lock) ==
  760                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  761                                         cpu_spinwait();
  762                                 GIANT_RESTORE();
  763                                 continue;
  764                         } else if (LK_CAN_ADAPT(lk, flags) &&
  765                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  766                             spintries < ALK_RETRIES) {
  767                                 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  768                                     !atomic_cmpset_ptr(&lk->lk_lock, x,
  769                                     x | LK_EXCLUSIVE_SPINNERS))
  770                                         continue;
  771                                 if (flags & LK_INTERLOCK) {
  772                                         class->lc_unlock(ilk);
  773                                         flags &= ~LK_INTERLOCK;
  774                                 }
  775                                 GIANT_SAVE();
  776                                 spintries++;
  777                                 for (i = 0; i < ALK_LOOPS; i++) {
  778                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  779                                                 CTR4(KTR_LOCK,
  780                                     "%s: shared spinning on %p with %u and %u",
  781                                                     __func__, lk, spintries, i);
  782                                         if ((lk->lk_lock &
  783                                             LK_EXCLUSIVE_SPINNERS) == 0)
  784                                                 break;
  785                                         cpu_spinwait();
  786                                 }
  787                                 GIANT_RESTORE();
  788                                 if (i != ALK_LOOPS)
  789                                         continue;
  790                         }
  791 #endif
  792 
  793                         /*
  794                          * Acquire the sleepqueue chain lock because we
  795                          * probabilly will need to manipulate waiters flags.
  796                          */
  797                         sleepq_lock(&lk->lock_object);
  798                         x = lk->lk_lock;
  799 
  800                         /*
  801                          * if the lock has been released while we spun on
  802                          * the sleepqueue chain lock just try again.
  803                          */
  804                         if (x == LK_UNLOCKED) {
  805                                 sleepq_release(&lk->lock_object);
  806                                 continue;
  807                         }
  808 
  809 #ifdef ADAPTIVE_LOCKMGRS
  810                         /*
  811                          * The current lock owner might have started executing
  812                          * on another CPU (or the lock could have changed
  813                          * owner) while we were waiting on the turnstile
  814                          * chain lock.  If so, drop the turnstile lock and try
  815                          * again.
  816                          */
  817                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  818                             LK_HOLDER(x) != LK_KERNPROC) {
  819                                 owner = (struct thread *)LK_HOLDER(x);
  820                                 if (TD_IS_RUNNING(owner)) {
  821                                         sleepq_release(&lk->lock_object);
  822                                         continue;
  823                                 }
  824                         }
  825 #endif
  826 
  827                         /*
  828                          * The lock can be in the state where there is a
  829                          * pending queue of waiters, but still no owner.
  830                          * This happens when the lock is contested and an
  831                          * owner is going to claim the lock.
  832                          * If curthread is the one successfully acquiring it
  833                          * claim lock ownership and return, preserving waiters
  834                          * flags.
  835                          */
  836                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  837                         if ((x & ~v) == LK_UNLOCKED) {
  838                                 v &= ~LK_EXCLUSIVE_SPINNERS;
  839                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  840                                     tid | v)) {
  841                                         sleepq_release(&lk->lock_object);
  842                                         LOCK_LOG2(lk,
  843                                             "%s: %p claimed by a new writer",
  844                                             __func__, lk);
  845                                         break;
  846                                 }
  847                                 sleepq_release(&lk->lock_object);
  848                                 continue;
  849                         }
  850 
  851                         /*
  852                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
  853                          * fail, loop back and retry.
  854                          */
  855                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  856                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  857                                     x | LK_EXCLUSIVE_WAITERS)) {
  858                                         sleepq_release(&lk->lock_object);
  859                                         continue;
  860                                 }
  861                                 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  862                                     __func__, lk);
  863                         }
  864 
  865                         /*
  866                          * As far as we have been unable to acquire the
  867                          * exclusive lock and the exclusive waiters flag
  868                          * is set, we will sleep.
  869                          */
  870                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  871                             SQ_EXCLUSIVE_QUEUE);
  872                         flags &= ~LK_INTERLOCK;
  873                         if (error) {
  874                                 LOCK_LOG3(lk,
  875                                     "%s: interrupted sleep for %p with %d",
  876                                     __func__, lk, error);
  877                                 break;
  878                         }
  879                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  880                             __func__, lk);
  881                 }
  882                 if (error == 0) {
  883                         lock_profile_obtain_lock_success(&lk->lock_object,
  884                             contested, waittime, file, line);
  885                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  886                             lk->lk_recurse, file, line);
  887                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  888                             LK_TRYWIT(flags), file, line);
  889                         TD_LOCKS_INC(curthread);
  890                         STACK_SAVE(lk);
  891                 }
  892                 break;
  893         case LK_DOWNGRADE:
  894                 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  895                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  896                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  897                 TD_SLOCKS_INC(curthread);
  898 
  899                 /*
  900                  * In order to preserve waiters flags, just spin.
  901                  */
  902                 for (;;) {
  903                         x = lk->lk_lock;
  904                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  905                         x &= LK_ALL_WAITERS;
  906                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  907                             LK_SHARERS_LOCK(1) | x))
  908                                 break;
  909                         cpu_spinwait();
  910                 }
  911                 break;
  912         case LK_RELEASE:
  913                 _lockmgr_assert(lk, KA_LOCKED, file, line);
  914                 x = lk->lk_lock;
  915 
  916                 if ((x & LK_SHARE) == 0) {
  917 
  918                         /*
  919                          * As first option, treact the lock as if it has not
  920                          * any waiter.
  921                          * Fix-up the tid var if the lock has been disowned.
  922                          */
  923                         if (LK_HOLDER(x) == LK_KERNPROC)
  924                                 tid = LK_KERNPROC;
  925                         else {
  926                                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  927                                     file, line);
  928                                 TD_LOCKS_DEC(curthread);
  929                         }
  930                         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  931                             lk->lk_recurse, file, line);
  932 
  933                         /*
  934                          * The lock is held in exclusive mode.
  935                          * If the lock is recursed also, then unrecurse it.
  936                          */
  937                         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  938                                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  939                                     lk);
  940                                 lk->lk_recurse--;
  941                                 break;
  942                         }
  943                         if (tid != LK_KERNPROC)
  944                                 lock_profile_release_lock(&lk->lock_object);
  945 
  946                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  947                             LK_UNLOCKED))
  948                                 break;
  949 
  950                         sleepq_lock(&lk->lock_object);
  951                         x = lk->lk_lock;
  952                         v = LK_UNLOCKED;
  953 
  954                         /*
  955                          * If the lock has exclusive waiters, give them
  956                          * preference in order to avoid deadlock with
  957                          * shared runners up.
  958                          * If interruptible sleeps left the exclusive queue
  959                          * empty avoid a starvation for the threads sleeping
  960                          * on the shared queue by giving them precedence
  961                          * and cleaning up the exclusive waiters bit anyway.
  962                          * Please note that the LK_EXSLPFAIL flag may be lying
  963                          * about the real presence of waiters with the
  964                          * LK_SLEEPFAIL flag on because they may be used in
  965                          * conjuction with interruptible sleeps.
  966                          */
  967                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  968                         realexslp = sleepq_sleepcnt(&lk->lock_object,
  969                             SQ_EXCLUSIVE_QUEUE);
  970                         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  971                                 if ((lk->lock_object.lo_flags &
  972                                     LK_EXSLPFAIL) == 0) {
  973                                         lk->lock_object.lo_flags &=
  974                                             ~LK_EXSLPFAIL;
  975                                         queue = SQ_EXCLUSIVE_QUEUE;
  976                                         v |= (x & LK_SHARED_WAITERS);
  977                                 } else {
  978                                         lk->lock_object.lo_flags &=
  979                                             ~LK_EXSLPFAIL;
  980                                         LOCK_LOG2(lk,
  981                                         "%s: %p has only LK_SLEEPFAIL sleepers",
  982                                             __func__, lk);
  983                                         LOCK_LOG2(lk,
  984                         "%s: %p waking up threads on the exclusive queue",
  985                                             __func__, lk);
  986                                         wakeup_swapper =
  987                                             sleepq_broadcast(&lk->lock_object,
  988                                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  989                                         queue = SQ_SHARED_QUEUE;
  990                                 }
  991                         } else {
  992 
  993                                 /*
  994                                  * Exclusive waiters sleeping with LK_SLEEPFAIL
  995                                  * on and using interruptible sleeps/timeout
  996                                  * may have left spourious LK_EXSLPFAIL flag
  997                                  * on, so clean it up anyway.
  998                                  */
  999                                 lk->lock_object.lo_flags &= ~LK_EXSLPFAIL;
 1000                                 queue = SQ_SHARED_QUEUE;
 1001                         }
 1002 
 1003                         LOCK_LOG3(lk,
 1004                             "%s: %p waking up threads on the %s queue",
 1005                             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
 1006                             "exclusive");
 1007                         atomic_store_rel_ptr(&lk->lk_lock, v);
 1008                         wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
 1009                             SLEEPQ_LK, 0, queue);
 1010                         sleepq_release(&lk->lock_object);
 1011                         break;
 1012                 } else
 1013                         wakeup_swapper = wakeupshlk(lk, file, line);
 1014                 break;
 1015         case LK_DRAIN:
 1016                 if (LK_CAN_WITNESS(flags))
 1017                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
 1018                             LOP_EXCLUSIVE, file, line, ilk);
 1019 
 1020                 /*
 1021                  * Trying to drain a lock we already own will result in a
 1022                  * deadlock.
 1023                  */
 1024                 if (lockmgr_xlocked(lk)) {
 1025                         if (flags & LK_INTERLOCK)
 1026                                 class->lc_unlock(ilk);
 1027                         panic("%s: draining %s with the lock held @ %s:%d\n",
 1028                             __func__, iwmesg, file, line);
 1029                 }
 1030 
 1031                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
 1032                         lock_profile_obtain_lock_failed(&lk->lock_object,
 1033                             &contested, &waittime);
 1034 
 1035                         /*
 1036                          * If the lock is expected to not sleep just give up
 1037                          * and return.
 1038                          */
 1039                         if (LK_TRYOP(flags)) {
 1040                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
 1041                                     __func__, lk);
 1042                                 error = EBUSY;
 1043                                 break;
 1044                         }
 1045 
 1046                         /*
 1047                          * Acquire the sleepqueue chain lock because we
 1048                          * probabilly will need to manipulate waiters flags.
 1049                          */
 1050                         sleepq_lock(&lk->lock_object);
 1051                         x = lk->lk_lock;
 1052 
 1053                         /*
 1054                          * if the lock has been released while we spun on
 1055                          * the sleepqueue chain lock just try again.
 1056                          */
 1057                         if (x == LK_UNLOCKED) {
 1058                                 sleepq_release(&lk->lock_object);
 1059                                 continue;
 1060                         }
 1061 
 1062                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
 1063                         if ((x & ~v) == LK_UNLOCKED) {
 1064                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
 1065 
 1066                                 /*
 1067                                  * If interruptible sleeps left the exclusive
 1068                                  * queue empty avoid a starvation for the
 1069                                  * threads sleeping on the shared queue by
 1070                                  * giving them precedence and cleaning up the
 1071                                  * exclusive waiters bit anyway.
 1072                                  * Please note that the LK_EXSLPFAIL flag may
 1073                                  * be lying about the real presence of waiters
 1074                                  * with the LK_SLEEPFAIL flag on because they
 1075                                  * may be used in conjuction with interruptible
 1076                                  * sleeps.
 1077                                  */
 1078                                 if (v & LK_EXCLUSIVE_WAITERS) {
 1079                                         queue = SQ_EXCLUSIVE_QUEUE;
 1080                                         v &= ~LK_EXCLUSIVE_WAITERS;
 1081                                 } else {
 1082 
 1083                                         /*
 1084                                          * Exclusive waiters sleeping with
 1085                                          * LK_SLEEPFAIL on and using
 1086                                          * interruptible sleeps/timeout may
 1087                                          * have left spourious LK_EXSLPFAIL
 1088                                          * flag on, so clean it up anyway.
 1089                                          */
 1090                                         MPASS(v & LK_SHARED_WAITERS);
 1091                                         lk->lock_object.lo_flags &=
 1092                                             ~LK_EXSLPFAIL;
 1093                                         queue = SQ_SHARED_QUEUE;
 1094                                         v &= ~LK_SHARED_WAITERS;
 1095                                 }
 1096                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
 1097                                         realexslp =
 1098                                             sleepq_sleepcnt(&lk->lock_object,
 1099                                             SQ_EXCLUSIVE_QUEUE);
 1100                                         if ((lk->lock_object.lo_flags &
 1101                                             LK_EXSLPFAIL) == 0) {
 1102                                                 lk->lock_object.lo_flags &=
 1103                                                     ~LK_EXSLPFAIL;
 1104                                                 queue = SQ_SHARED_QUEUE;
 1105                                                 v &= ~LK_SHARED_WAITERS;
 1106                                                 if (realexslp != 0) {
 1107                                                         LOCK_LOG2(lk,
 1108                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1109                                                             __func__, lk);
 1110                                                         LOCK_LOG2(lk,
 1111                         "%s: %p waking up threads on the exclusive queue",
 1112                                                             __func__, lk);
 1113                                                         wakeup_swapper =
 1114                                                             sleepq_broadcast(
 1115                                                             &lk->lock_object,
 1116                                                             SLEEPQ_LK, 0,
 1117                                                             SQ_EXCLUSIVE_QUEUE);
 1118                                                 }
 1119                                         } else
 1120                                                 lk->lock_object.lo_flags &=
 1121                                                     ~LK_EXSLPFAIL;
 1122                                 }
 1123                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
 1124                                         sleepq_release(&lk->lock_object);
 1125                                         continue;
 1126                                 }
 1127                                 LOCK_LOG3(lk,
 1128                                 "%s: %p waking up all threads on the %s queue",
 1129                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
 1130                                     "shared" : "exclusive");
 1131                                 wakeup_swapper |= sleepq_broadcast(
 1132                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
 1133 
 1134                                 /*
 1135                                  * If shared waiters have been woken up we need
 1136                                  * to wait for one of them to acquire the lock
 1137                                  * before to set the exclusive waiters in
 1138                                  * order to avoid a deadlock.
 1139                                  */
 1140                                 if (queue == SQ_SHARED_QUEUE) {
 1141                                         for (v = lk->lk_lock;
 1142                                             (v & LK_SHARE) && !LK_SHARERS(v);
 1143                                             v = lk->lk_lock)
 1144                                                 cpu_spinwait();
 1145                                 }
 1146                         }
 1147 
 1148                         /*
 1149                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
 1150                          * fail, loop back and retry.
 1151                          */
 1152                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
 1153                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
 1154                                     x | LK_EXCLUSIVE_WAITERS)) {
 1155                                         sleepq_release(&lk->lock_object);
 1156                                         continue;
 1157                                 }
 1158                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
 1159                                     __func__, lk);
 1160                         }
 1161 
 1162                         /*
 1163                          * As far as we have been unable to acquire the
 1164                          * exclusive lock and the exclusive waiters flag
 1165                          * is set, we will sleep.
 1166                          */
 1167                         if (flags & LK_INTERLOCK) {
 1168                                 class->lc_unlock(ilk);
 1169                                 flags &= ~LK_INTERLOCK;
 1170                         }
 1171                         GIANT_SAVE();
 1172                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
 1173                             SQ_EXCLUSIVE_QUEUE);
 1174                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
 1175                         GIANT_RESTORE();
 1176                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
 1177                             __func__, lk);
 1178                 }
 1179 
 1180                 if (error == 0) {
 1181                         lock_profile_obtain_lock_success(&lk->lock_object,
 1182                             contested, waittime, file, line);
 1183                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
 1184                             lk->lk_recurse, file, line);
 1185                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
 1186                             LK_TRYWIT(flags), file, line);
 1187                         TD_LOCKS_INC(curthread);
 1188                         STACK_SAVE(lk);
 1189                 }
 1190                 break;
 1191         default:
 1192                 if (flags & LK_INTERLOCK)
 1193                         class->lc_unlock(ilk);
 1194                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
 1195         }
 1196 
 1197         if (flags & LK_INTERLOCK)
 1198                 class->lc_unlock(ilk);
 1199         if (wakeup_swapper)
 1200                 kick_proc0();
 1201 
 1202         return (error);
 1203 }
 1204 
 1205 void
 1206 _lockmgr_disown(struct lock *lk, const char *file, int line)
 1207 {
 1208         uintptr_t tid, x;
 1209 
 1210         tid = (uintptr_t)curthread;
 1211         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
 1212 
 1213         /*
 1214          * If the owner is already LK_KERNPROC just skip the whole operation.
 1215          */
 1216         if (LK_HOLDER(lk->lk_lock) != tid)
 1217                 return;
 1218         lock_profile_release_lock(&lk->lock_object);
 1219         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
 1220         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
 1221         TD_LOCKS_DEC(curthread);
 1222         STACK_SAVE(lk);
 1223 
 1224         /*
 1225          * In order to preserve waiters flags, just spin.
 1226          */
 1227         for (;;) {
 1228                 x = lk->lk_lock;
 1229                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1230                 x &= LK_ALL_WAITERS;
 1231                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 1232                     LK_KERNPROC | x))
 1233                         return;
 1234                 cpu_spinwait();
 1235         }
 1236 }
 1237 
 1238 void
 1239 lockmgr_printinfo(struct lock *lk)
 1240 {
 1241         struct thread *td;
 1242         uintptr_t x;
 1243 
 1244         if (lk->lk_lock == LK_UNLOCKED)
 1245                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
 1246         else if (lk->lk_lock & LK_SHARE)
 1247                 printf("lock type %s: SHARED (count %ju)\n",
 1248                     lk->lock_object.lo_name,
 1249                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1250         else {
 1251                 td = lockmgr_xholder(lk);
 1252                 printf("lock type %s: EXCL by thread %p (pid %d)\n",
 1253                     lk->lock_object.lo_name, td, td->td_proc->p_pid);
 1254         }
 1255 
 1256         x = lk->lk_lock;
 1257         if (x & LK_EXCLUSIVE_WAITERS)
 1258                 printf(" with exclusive waiters pending\n");
 1259         if (x & LK_SHARED_WAITERS)
 1260                 printf(" with shared waiters pending\n");
 1261         if (x & LK_EXCLUSIVE_SPINNERS)
 1262                 printf(" with exclusive spinners pending\n");
 1263 
 1264         STACK_PRINT(lk);
 1265 }
 1266 
 1267 int
 1268 lockstatus(struct lock *lk)
 1269 {
 1270         uintptr_t v, x;
 1271         int ret;
 1272 
 1273         ret = LK_SHARED;
 1274         x = lk->lk_lock;
 1275         v = LK_HOLDER(x);
 1276 
 1277         if ((x & LK_SHARE) == 0) {
 1278                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
 1279                         ret = LK_EXCLUSIVE;
 1280                 else
 1281                         ret = LK_EXCLOTHER;
 1282         } else if (x == LK_UNLOCKED)
 1283                 ret = 0;
 1284 
 1285         return (ret);
 1286 }
 1287 
 1288 #ifdef INVARIANT_SUPPORT
 1289 #ifndef INVARIANTS
 1290 #undef  _lockmgr_assert
 1291 #endif
 1292 
 1293 void
 1294 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
 1295 {
 1296         int slocked = 0;
 1297 
 1298         if (panicstr != NULL)
 1299                 return;
 1300         switch (what) {
 1301         case KA_SLOCKED:
 1302         case KA_SLOCKED | KA_NOTRECURSED:
 1303         case KA_SLOCKED | KA_RECURSED:
 1304                 slocked = 1;
 1305         case KA_LOCKED:
 1306         case KA_LOCKED | KA_NOTRECURSED:
 1307         case KA_LOCKED | KA_RECURSED:
 1308 #ifdef WITNESS
 1309 
 1310                 /*
 1311                  * We cannot trust WITNESS if the lock is held in exclusive
 1312                  * mode and a call to lockmgr_disown() happened.
 1313                  * Workaround this skipping the check if the lock is held in
 1314                  * exclusive mode even for the KA_LOCKED case.
 1315                  */
 1316                 if (slocked || (lk->lk_lock & LK_SHARE)) {
 1317                         witness_assert(&lk->lock_object, what, file, line);
 1318                         break;
 1319                 }
 1320 #endif
 1321                 if (lk->lk_lock == LK_UNLOCKED ||
 1322                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
 1323                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
 1324                         panic("Lock %s not %slocked @ %s:%d\n",
 1325                             lk->lock_object.lo_name, slocked ? "share" : "",
 1326                             file, line);
 1327 
 1328                 if ((lk->lk_lock & LK_SHARE) == 0) {
 1329                         if (lockmgr_recursed(lk)) {
 1330                                 if (what & KA_NOTRECURSED)
 1331                                         panic("Lock %s recursed @ %s:%d\n",
 1332                                             lk->lock_object.lo_name, file,
 1333                                             line);
 1334                         } else if (what & KA_RECURSED)
 1335                                 panic("Lock %s not recursed @ %s:%d\n",
 1336                                     lk->lock_object.lo_name, file, line);
 1337                 }
 1338                 break;
 1339         case KA_XLOCKED:
 1340         case KA_XLOCKED | KA_NOTRECURSED:
 1341         case KA_XLOCKED | KA_RECURSED:
 1342                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
 1343                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1344                             lk->lock_object.lo_name, file, line);
 1345                 if (lockmgr_recursed(lk)) {
 1346                         if (what & KA_NOTRECURSED)
 1347                                 panic("Lock %s recursed @ %s:%d\n",
 1348                                     lk->lock_object.lo_name, file, line);
 1349                 } else if (what & KA_RECURSED)
 1350                         panic("Lock %s not recursed @ %s:%d\n",
 1351                             lk->lock_object.lo_name, file, line);
 1352                 break;
 1353         case KA_UNLOCKED:
 1354                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
 1355                         panic("Lock %s exclusively locked @ %s:%d\n",
 1356                             lk->lock_object.lo_name, file, line);
 1357                 break;
 1358         default:
 1359                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
 1360                     line);
 1361         }
 1362 }
 1363 #endif
 1364 
 1365 #ifdef DDB
 1366 int
 1367 lockmgr_chain(struct thread *td, struct thread **ownerp)
 1368 {
 1369         struct lock *lk;
 1370 
 1371         lk = td->td_wchan;
 1372 
 1373         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
 1374                 return (0);
 1375         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
 1376         if (lk->lk_lock & LK_SHARE)
 1377                 db_printf("SHARED (count %ju)\n",
 1378                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1379         else
 1380                 db_printf("EXCL\n");
 1381         *ownerp = lockmgr_xholder(lk);
 1382 
 1383         return (1);
 1384 }
 1385 
 1386 static void
 1387 db_show_lockmgr(struct lock_object *lock)
 1388 {
 1389         struct thread *td;
 1390         struct lock *lk;
 1391 
 1392         lk = (struct lock *)lock;
 1393 
 1394         db_printf(" state: ");
 1395         if (lk->lk_lock == LK_UNLOCKED)
 1396                 db_printf("UNLOCKED\n");
 1397         else if (lk->lk_lock & LK_SHARE)
 1398                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
 1399         else {
 1400                 td = lockmgr_xholder(lk);
 1401                 if (td == (struct thread *)LK_KERNPROC)
 1402                         db_printf("XLOCK: LK_KERNPROC\n");
 1403                 else
 1404                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1405                             td->td_tid, td->td_proc->p_pid,
 1406                             td->td_proc->p_comm);
 1407                 if (lockmgr_recursed(lk))
 1408                         db_printf(" recursed: %d\n", lk->lk_recurse);
 1409         }
 1410         db_printf(" waiters: ");
 1411         switch (lk->lk_lock & LK_ALL_WAITERS) {
 1412         case LK_SHARED_WAITERS:
 1413                 db_printf("shared\n");
 1414                 break;
 1415         case LK_EXCLUSIVE_WAITERS:
 1416                 db_printf("exclusive\n");
 1417                 break;
 1418         case LK_ALL_WAITERS:
 1419                 db_printf("shared and exclusive\n");
 1420                 break;
 1421         default:
 1422                 db_printf("none\n");
 1423         }
 1424         db_printf(" spinners: ");
 1425         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
 1426                 db_printf("exclusive\n");
 1427         else
 1428                 db_printf("none\n");
 1429 }
 1430 #endif

Cache object: 3c27ad2cd1c663b87188fe5554e6ddb4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.