The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include "opt_adaptive_lockmgrs.h"
   30 #include "opt_ddb.h"
   31 #include "opt_hwpmc_hooks.h"
   32 #include "opt_kdtrace.h"
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/9.2/sys/kern/kern_lock.c 245351 2013-01-13 00:32:07Z mjg $");
   36 
   37 #include <sys/param.h>
   38 #include <sys/ktr.h>
   39 #include <sys/lock.h>
   40 #include <sys/lock_profile.h>
   41 #include <sys/lockmgr.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/sleepqueue.h>
   45 #ifdef DEBUG_LOCKS
   46 #include <sys/stack.h>
   47 #endif
   48 #include <sys/sysctl.h>
   49 #include <sys/systm.h>
   50 
   51 #include <machine/cpu.h>
   52 
   53 #ifdef DDB
   54 #include <ddb/ddb.h>
   55 #endif
   56 
   57 #ifdef HWPMC_HOOKS
   58 #include <sys/pmckern.h>
   59 PMC_SOFT_DECLARE( , , lock, failed);
   60 #endif
   61 
   62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
   63     (LK_ADAPTIVE | LK_NOSHARE));
   64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
   65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
   66 
   67 #define SQ_EXCLUSIVE_QUEUE      0
   68 #define SQ_SHARED_QUEUE         1
   69 
   70 #ifdef ADAPTIVE_LOCKMGRS
   71 #define ALK_RETRIES             10
   72 #define ALK_LOOPS               10000
   73 #endif
   74 
   75 #ifndef INVARIANTS
   76 #define _lockmgr_assert(lk, what, file, line)
   77 #define TD_LOCKS_INC(td)
   78 #define TD_LOCKS_DEC(td)
   79 #else
   80 #define TD_LOCKS_INC(td)        ((td)->td_locks++)
   81 #define TD_LOCKS_DEC(td)        ((td)->td_locks--)
   82 #endif
   83 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
   84 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
   85 
   86 #ifndef DEBUG_LOCKS
   87 #define STACK_PRINT(lk)
   88 #define STACK_SAVE(lk)
   89 #define STACK_ZERO(lk)
   90 #else
   91 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
   92 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
   93 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
   94 #endif
   95 
   96 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
   97         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   98                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
   99 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
  100         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
  101                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
  102 
  103 #define GIANT_DECLARE                                                   \
  104         int _i = 0;                                                     \
  105         WITNESS_SAVE_DECL(Giant)
  106 #define GIANT_RESTORE() do {                                            \
  107         if (_i > 0) {                                                   \
  108                 while (_i--)                                            \
  109                         mtx_lock(&Giant);                               \
  110                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  111         }                                                               \
  112 } while (0)
  113 #define GIANT_SAVE() do {                                               \
  114         if (mtx_owned(&Giant)) {                                        \
  115                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
  116                 while (mtx_owned(&Giant)) {                             \
  117                         _i++;                                           \
  118                         mtx_unlock(&Giant);                             \
  119                 }                                                       \
  120         }                                                               \
  121 } while (0)
  122 
  123 #define LK_CAN_SHARE(x)                                                 \
  124         (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||      \
  125         ((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||                           \
  126         curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  127 #define LK_TRYOP(x)                                                     \
  128         ((x) & LK_NOWAIT)
  129 
  130 #define LK_CAN_WITNESS(x)                                               \
  131         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  132 #define LK_TRYWIT(x)                                                    \
  133         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  134 
  135 #define LK_CAN_ADAPT(lk, f)                                             \
  136         (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&             \
  137         ((f) & LK_SLEEPFAIL) == 0)
  138 
  139 #define lockmgr_disowned(lk)                                            \
  140         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  141 
  142 #define lockmgr_xlocked(lk)                                             \
  143         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  144 
  145 static void      assert_lockmgr(struct lock_object *lock, int how);
  146 #ifdef DDB
  147 static void      db_show_lockmgr(struct lock_object *lock);
  148 #endif
  149 static void      lock_lockmgr(struct lock_object *lock, int how);
  150 #ifdef KDTRACE_HOOKS
  151 static int       owner_lockmgr(struct lock_object *lock, struct thread **owner);
  152 #endif
  153 static int       unlock_lockmgr(struct lock_object *lock);
  154 
  155 struct lock_class lock_class_lockmgr = {
  156         .lc_name = "lockmgr",
  157         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  158         .lc_assert = assert_lockmgr,
  159 #ifdef DDB
  160         .lc_ddb_show = db_show_lockmgr,
  161 #endif
  162         .lc_lock = lock_lockmgr,
  163         .lc_unlock = unlock_lockmgr,
  164 #ifdef KDTRACE_HOOKS
  165         .lc_owner = owner_lockmgr,
  166 #endif
  167 };
  168 
  169 static __inline struct thread *
  170 lockmgr_xholder(struct lock *lk)
  171 {
  172         uintptr_t x;
  173 
  174         x = lk->lk_lock;
  175         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  176 }
  177 
  178 /*
  179  * It assumes sleepq_lock held and returns with this one unheld.
  180  * It also assumes the generic interlock is sane and previously checked.
  181  * If LK_INTERLOCK is specified the interlock is not reacquired after the
  182  * sleep.
  183  */
  184 static __inline int
  185 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  186     const char *wmesg, int pri, int timo, int queue)
  187 {
  188         GIANT_DECLARE;
  189         struct lock_class *class;
  190         int catch, error;
  191 
  192         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  193         catch = pri & PCATCH;
  194         pri &= PRIMASK;
  195         error = 0;
  196 
  197         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  198             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  199 
  200         if (flags & LK_INTERLOCK)
  201                 class->lc_unlock(ilk);
  202         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
  203                 lk->lk_exslpfail++;
  204         GIANT_SAVE();
  205         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  206             SLEEPQ_INTERRUPTIBLE : 0), queue);
  207         if ((flags & LK_TIMELOCK) && timo)
  208                 sleepq_set_timeout(&lk->lock_object, timo);
  209 
  210         /*
  211          * Decisional switch for real sleeping.
  212          */
  213         if ((flags & LK_TIMELOCK) && timo && catch)
  214                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
  215         else if ((flags & LK_TIMELOCK) && timo)
  216                 error = sleepq_timedwait(&lk->lock_object, pri);
  217         else if (catch)
  218                 error = sleepq_wait_sig(&lk->lock_object, pri);
  219         else
  220                 sleepq_wait(&lk->lock_object, pri);
  221         GIANT_RESTORE();
  222         if ((flags & LK_SLEEPFAIL) && error == 0)
  223                 error = ENOLCK;
  224 
  225         return (error);
  226 }
  227 
  228 static __inline int
  229 wakeupshlk(struct lock *lk, const char *file, int line)
  230 {
  231         uintptr_t v, x;
  232         u_int realexslp;
  233         int queue, wakeup_swapper;
  234 
  235         TD_LOCKS_DEC(curthread);
  236         TD_SLOCKS_DEC(curthread);
  237         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  238         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  239 
  240         wakeup_swapper = 0;
  241         for (;;) {
  242                 x = lk->lk_lock;
  243 
  244                 /*
  245                  * If there is more than one shared lock held, just drop one
  246                  * and return.
  247                  */
  248                 if (LK_SHARERS(x) > 1) {
  249                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  250                             x - LK_ONE_SHARER))
  251                                 break;
  252                         continue;
  253                 }
  254 
  255                 /*
  256                  * If there are not waiters on the exclusive queue, drop the
  257                  * lock quickly.
  258                  */
  259                 if ((x & LK_ALL_WAITERS) == 0) {
  260                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  261                             LK_SHARERS_LOCK(1));
  262                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  263                                 break;
  264                         continue;
  265                 }
  266 
  267                 /*
  268                  * We should have a sharer with waiters, so enter the hard
  269                  * path in order to handle wakeups correctly.
  270                  */
  271                 sleepq_lock(&lk->lock_object);
  272                 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  273                 v = LK_UNLOCKED;
  274 
  275                 /*
  276                  * If the lock has exclusive waiters, give them preference in
  277                  * order to avoid deadlock with shared runners up.
  278                  * If interruptible sleeps left the exclusive queue empty
  279                  * avoid a starvation for the threads sleeping on the shared
  280                  * queue by giving them precedence and cleaning up the
  281                  * exclusive waiters bit anyway.
  282                  * Please note that lk_exslpfail count may be lying about
  283                  * the real number of waiters with the LK_SLEEPFAIL flag on
  284                  * because they may be used in conjuction with interruptible
  285                  * sleeps so lk_exslpfail might be considered an 'upper limit'
  286                  * bound, including the edge cases.
  287                  */
  288                 realexslp = sleepq_sleepcnt(&lk->lock_object,
  289                     SQ_EXCLUSIVE_QUEUE);
  290                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  291                         if (lk->lk_exslpfail < realexslp) {
  292                                 lk->lk_exslpfail = 0;
  293                                 queue = SQ_EXCLUSIVE_QUEUE;
  294                                 v |= (x & LK_SHARED_WAITERS);
  295                         } else {
  296                                 lk->lk_exslpfail = 0;
  297                                 LOCK_LOG2(lk,
  298                                     "%s: %p has only LK_SLEEPFAIL sleepers",
  299                                     __func__, lk);
  300                                 LOCK_LOG2(lk,
  301                             "%s: %p waking up threads on the exclusive queue",
  302                                     __func__, lk);
  303                                 wakeup_swapper =
  304                                     sleepq_broadcast(&lk->lock_object,
  305                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  306                                 queue = SQ_SHARED_QUEUE;
  307                         }
  308                                 
  309                 } else {
  310 
  311                         /*
  312                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
  313                          * and using interruptible sleeps/timeout may have
  314                          * left spourious lk_exslpfail counts on, so clean
  315                          * it up anyway.
  316                          */
  317                         lk->lk_exslpfail = 0;
  318                         queue = SQ_SHARED_QUEUE;
  319                 }
  320 
  321                 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  322                     v)) {
  323                         sleepq_release(&lk->lock_object);
  324                         continue;
  325                 }
  326                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  327                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  328                     "exclusive");
  329                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  330                     0, queue);
  331                 sleepq_release(&lk->lock_object);
  332                 break;
  333         }
  334 
  335         lock_profile_release_lock(&lk->lock_object);
  336         return (wakeup_swapper);
  337 }
  338 
  339 static void
  340 assert_lockmgr(struct lock_object *lock, int what)
  341 {
  342 
  343         panic("lockmgr locks do not support assertions");
  344 }
  345 
  346 static void
  347 lock_lockmgr(struct lock_object *lock, int how)
  348 {
  349 
  350         panic("lockmgr locks do not support sleep interlocking");
  351 }
  352 
  353 static int
  354 unlock_lockmgr(struct lock_object *lock)
  355 {
  356 
  357         panic("lockmgr locks do not support sleep interlocking");
  358 }
  359 
  360 #ifdef KDTRACE_HOOKS
  361 static int
  362 owner_lockmgr(struct lock_object *lock, struct thread **owner)
  363 {
  364 
  365         panic("lockmgr locks do not support owner inquiring");
  366 }
  367 #endif
  368 
  369 void
  370 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  371 {
  372         int iflags;
  373 
  374         MPASS((flags & ~LK_INIT_MASK) == 0);
  375         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  376             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  377             &lk->lk_lock));
  378 
  379         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  380         if (flags & LK_CANRECURSE)
  381                 iflags |= LO_RECURSABLE;
  382         if ((flags & LK_NODUP) == 0)
  383                 iflags |= LO_DUPOK;
  384         if (flags & LK_NOPROFILE)
  385                 iflags |= LO_NOPROFILE;
  386         if ((flags & LK_NOWITNESS) == 0)
  387                 iflags |= LO_WITNESS;
  388         if (flags & LK_QUIET)
  389                 iflags |= LO_QUIET;
  390         iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  391 
  392         lk->lk_lock = LK_UNLOCKED;
  393         lk->lk_recurse = 0;
  394         lk->lk_exslpfail = 0;
  395         lk->lk_timo = timo;
  396         lk->lk_pri = pri;
  397         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  398         STACK_ZERO(lk);
  399 }
  400 
  401 /*
  402  * XXX: Gross hacks to manipulate external lock flags after
  403  * initialization.  Used for certain vnode and buf locks.
  404  */
  405 void
  406 lockallowshare(struct lock *lk)
  407 {
  408 
  409         lockmgr_assert(lk, KA_XLOCKED);
  410         lk->lock_object.lo_flags &= ~LK_NOSHARE;
  411 }
  412 
  413 void
  414 lockallowrecurse(struct lock *lk)
  415 {
  416 
  417         lockmgr_assert(lk, KA_XLOCKED);
  418         lk->lock_object.lo_flags |= LO_RECURSABLE;
  419 }
  420 
  421 void
  422 lockdisablerecurse(struct lock *lk)
  423 {
  424 
  425         lockmgr_assert(lk, KA_XLOCKED);
  426         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
  427 }
  428 
  429 void
  430 lockdestroy(struct lock *lk)
  431 {
  432 
  433         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  434         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  435         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
  436         lock_destroy(&lk->lock_object);
  437 }
  438 
  439 int
  440 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  441     const char *wmesg, int pri, int timo, const char *file, int line)
  442 {
  443         GIANT_DECLARE;
  444         struct lock_class *class;
  445         const char *iwmesg;
  446         uintptr_t tid, v, x;
  447         u_int op, realexslp;
  448         int error, ipri, itimo, queue, wakeup_swapper;
  449 #ifdef LOCK_PROFILING
  450         uint64_t waittime = 0;
  451         int contested = 0;
  452 #endif
  453 #ifdef ADAPTIVE_LOCKMGRS
  454         volatile struct thread *owner;
  455         u_int i, spintries = 0;
  456 #endif
  457 
  458         error = 0;
  459         tid = (uintptr_t)curthread;
  460         op = (flags & LK_TYPE_MASK);
  461         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  462         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  463         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  464 
  465         MPASS((flags & ~LK_TOTAL_MASK) == 0);
  466         KASSERT((op & (op - 1)) == 0,
  467             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  468         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  469             (op != LK_DOWNGRADE && op != LK_RELEASE),
  470             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  471             __func__, file, line));
  472         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  473             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  474             __func__, file, line));
  475 
  476         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  477         if (panicstr != NULL) {
  478                 if (flags & LK_INTERLOCK)
  479                         class->lc_unlock(ilk);
  480                 return (0);
  481         }
  482 
  483         if (lk->lock_object.lo_flags & LK_NOSHARE) {
  484                 switch (op) {
  485                 case LK_SHARED:
  486                         op = LK_EXCLUSIVE;
  487                         break;
  488                 case LK_UPGRADE:
  489                 case LK_DOWNGRADE:
  490                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
  491                             file, line);
  492                         if (flags & LK_INTERLOCK)
  493                                 class->lc_unlock(ilk);
  494                         return (0);
  495                 }
  496         }
  497 
  498         wakeup_swapper = 0;
  499         switch (op) {
  500         case LK_SHARED:
  501                 if (LK_CAN_WITNESS(flags))
  502                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  503                             file, line, ilk);
  504                 for (;;) {
  505                         x = lk->lk_lock;
  506 
  507                         /*
  508                          * If no other thread has an exclusive lock, or
  509                          * no exclusive waiter is present, bump the count of
  510                          * sharers.  Since we have to preserve the state of
  511                          * waiters, if we fail to acquire the shared lock
  512                          * loop back and retry.
  513                          */
  514                         if (LK_CAN_SHARE(x)) {
  515                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  516                                     x + LK_ONE_SHARER))
  517                                         break;
  518                                 continue;
  519                         }
  520 #ifdef HWPMC_HOOKS
  521                         PMC_SOFT_CALL( , , lock, failed);
  522 #endif
  523                         lock_profile_obtain_lock_failed(&lk->lock_object,
  524                             &contested, &waittime);
  525 
  526                         /*
  527                          * If the lock is already held by curthread in
  528                          * exclusive way avoid a deadlock.
  529                          */
  530                         if (LK_HOLDER(x) == tid) {
  531                                 LOCK_LOG2(lk,
  532                                     "%s: %p already held in exclusive mode",
  533                                     __func__, lk);
  534                                 error = EDEADLK;
  535                                 break;
  536                         }
  537 
  538                         /*
  539                          * If the lock is expected to not sleep just give up
  540                          * and return.
  541                          */
  542                         if (LK_TRYOP(flags)) {
  543                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  544                                     __func__, lk);
  545                                 error = EBUSY;
  546                                 break;
  547                         }
  548 
  549 #ifdef ADAPTIVE_LOCKMGRS
  550                         /*
  551                          * If the owner is running on another CPU, spin until
  552                          * the owner stops running or the state of the lock
  553                          * changes.  We need a double-state handle here
  554                          * because for a failed acquisition the lock can be
  555                          * either held in exclusive mode or shared mode
  556                          * (for the writer starvation avoidance technique).
  557                          */
  558                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  559                             LK_HOLDER(x) != LK_KERNPROC) {
  560                                 owner = (struct thread *)LK_HOLDER(x);
  561                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  562                                         CTR3(KTR_LOCK,
  563                                             "%s: spinning on %p held by %p",
  564                                             __func__, lk, owner);
  565 
  566                                 /*
  567                                  * If we are holding also an interlock drop it
  568                                  * in order to avoid a deadlock if the lockmgr
  569                                  * owner is adaptively spinning on the
  570                                  * interlock itself.
  571                                  */
  572                                 if (flags & LK_INTERLOCK) {
  573                                         class->lc_unlock(ilk);
  574                                         flags &= ~LK_INTERLOCK;
  575                                 }
  576                                 GIANT_SAVE();
  577                                 while (LK_HOLDER(lk->lk_lock) ==
  578                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  579                                         cpu_spinwait();
  580                                 GIANT_RESTORE();
  581                                 continue;
  582                         } else if (LK_CAN_ADAPT(lk, flags) &&
  583                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  584                             spintries < ALK_RETRIES) {
  585                                 if (flags & LK_INTERLOCK) {
  586                                         class->lc_unlock(ilk);
  587                                         flags &= ~LK_INTERLOCK;
  588                                 }
  589                                 GIANT_SAVE();
  590                                 spintries++;
  591                                 for (i = 0; i < ALK_LOOPS; i++) {
  592                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  593                                                 CTR4(KTR_LOCK,
  594                                     "%s: shared spinning on %p with %u and %u",
  595                                                     __func__, lk, spintries, i);
  596                                         x = lk->lk_lock;
  597                                         if ((x & LK_SHARE) == 0 ||
  598                                             LK_CAN_SHARE(x) != 0)
  599                                                 break;
  600                                         cpu_spinwait();
  601                                 }
  602                                 GIANT_RESTORE();
  603                                 if (i != ALK_LOOPS)
  604                                         continue;
  605                         }
  606 #endif
  607 
  608                         /*
  609                          * Acquire the sleepqueue chain lock because we
  610                          * probabilly will need to manipulate waiters flags.
  611                          */
  612                         sleepq_lock(&lk->lock_object);
  613                         x = lk->lk_lock;
  614 
  615                         /*
  616                          * if the lock can be acquired in shared mode, try
  617                          * again.
  618                          */
  619                         if (LK_CAN_SHARE(x)) {
  620                                 sleepq_release(&lk->lock_object);
  621                                 continue;
  622                         }
  623 
  624 #ifdef ADAPTIVE_LOCKMGRS
  625                         /*
  626                          * The current lock owner might have started executing
  627                          * on another CPU (or the lock could have changed
  628                          * owner) while we were waiting on the turnstile
  629                          * chain lock.  If so, drop the turnstile lock and try
  630                          * again.
  631                          */
  632                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  633                             LK_HOLDER(x) != LK_KERNPROC) {
  634                                 owner = (struct thread *)LK_HOLDER(x);
  635                                 if (TD_IS_RUNNING(owner)) {
  636                                         sleepq_release(&lk->lock_object);
  637                                         continue;
  638                                 }
  639                         }
  640 #endif
  641 
  642                         /*
  643                          * Try to set the LK_SHARED_WAITERS flag.  If we fail,
  644                          * loop back and retry.
  645                          */
  646                         if ((x & LK_SHARED_WAITERS) == 0) {
  647                                 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  648                                     x | LK_SHARED_WAITERS)) {
  649                                         sleepq_release(&lk->lock_object);
  650                                         continue;
  651                                 }
  652                                 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  653                                     __func__, lk);
  654                         }
  655 
  656                         /*
  657                          * As far as we have been unable to acquire the
  658                          * shared lock and the shared waiters flag is set,
  659                          * we will sleep.
  660                          */
  661                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  662                             SQ_SHARED_QUEUE);
  663                         flags &= ~LK_INTERLOCK;
  664                         if (error) {
  665                                 LOCK_LOG3(lk,
  666                                     "%s: interrupted sleep for %p with %d",
  667                                     __func__, lk, error);
  668                                 break;
  669                         }
  670                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  671                             __func__, lk);
  672                 }
  673                 if (error == 0) {
  674                         lock_profile_obtain_lock_success(&lk->lock_object,
  675                             contested, waittime, file, line);
  676                         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  677                             line);
  678                         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  679                             line);
  680                         TD_LOCKS_INC(curthread);
  681                         TD_SLOCKS_INC(curthread);
  682                         STACK_SAVE(lk);
  683                 }
  684                 break;
  685         case LK_UPGRADE:
  686                 _lockmgr_assert(lk, KA_SLOCKED, file, line);
  687                 v = lk->lk_lock;
  688                 x = v & LK_ALL_WAITERS;
  689                 v &= LK_EXCLUSIVE_SPINNERS;
  690 
  691                 /*
  692                  * Try to switch from one shared lock to an exclusive one.
  693                  * We need to preserve waiters flags during the operation.
  694                  */
  695                 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  696                     tid | x)) {
  697                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  698                             line);
  699                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  700                             LK_TRYWIT(flags), file, line);
  701                         TD_SLOCKS_DEC(curthread);
  702                         break;
  703                 }
  704 
  705                 /*
  706                  * We have been unable to succeed in upgrading, so just
  707                  * give up the shared lock.
  708                  */
  709                 wakeup_swapper |= wakeupshlk(lk, file, line);
  710 
  711                 /* FALLTHROUGH */
  712         case LK_EXCLUSIVE:
  713                 if (LK_CAN_WITNESS(flags))
  714                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  715                             LOP_EXCLUSIVE, file, line, ilk);
  716 
  717                 /*
  718                  * If curthread already holds the lock and this one is
  719                  * allowed to recurse, simply recurse on it.
  720                  */
  721                 if (lockmgr_xlocked(lk)) {
  722                         if ((flags & LK_CANRECURSE) == 0 &&
  723                             (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  724 
  725                                 /*
  726                                  * If the lock is expected to not panic just
  727                                  * give up and return.
  728                                  */
  729                                 if (LK_TRYOP(flags)) {
  730                                         LOCK_LOG2(lk,
  731                                             "%s: %p fails the try operation",
  732                                             __func__, lk);
  733                                         error = EBUSY;
  734                                         break;
  735                                 }
  736                                 if (flags & LK_INTERLOCK)
  737                                         class->lc_unlock(ilk);
  738                 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  739                                     __func__, iwmesg, file, line);
  740                         }
  741                         lk->lk_recurse++;
  742                         LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  743                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  744                             lk->lk_recurse, file, line);
  745                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  746                             LK_TRYWIT(flags), file, line);
  747                         TD_LOCKS_INC(curthread);
  748                         break;
  749                 }
  750 
  751                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  752                     tid)) {
  753 #ifdef HWPMC_HOOKS
  754                         PMC_SOFT_CALL( , , lock, failed);
  755 #endif
  756                         lock_profile_obtain_lock_failed(&lk->lock_object,
  757                             &contested, &waittime);
  758 
  759                         /*
  760                          * If the lock is expected to not sleep just give up
  761                          * and return.
  762                          */
  763                         if (LK_TRYOP(flags)) {
  764                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  765                                     __func__, lk);
  766                                 error = EBUSY;
  767                                 break;
  768                         }
  769 
  770 #ifdef ADAPTIVE_LOCKMGRS
  771                         /*
  772                          * If the owner is running on another CPU, spin until
  773                          * the owner stops running or the state of the lock
  774                          * changes.
  775                          */
  776                         x = lk->lk_lock;
  777                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  778                             LK_HOLDER(x) != LK_KERNPROC) {
  779                                 owner = (struct thread *)LK_HOLDER(x);
  780                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  781                                         CTR3(KTR_LOCK,
  782                                             "%s: spinning on %p held by %p",
  783                                             __func__, lk, owner);
  784 
  785                                 /*
  786                                  * If we are holding also an interlock drop it
  787                                  * in order to avoid a deadlock if the lockmgr
  788                                  * owner is adaptively spinning on the
  789                                  * interlock itself.
  790                                  */
  791                                 if (flags & LK_INTERLOCK) {
  792                                         class->lc_unlock(ilk);
  793                                         flags &= ~LK_INTERLOCK;
  794                                 }
  795                                 GIANT_SAVE();
  796                                 while (LK_HOLDER(lk->lk_lock) ==
  797                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  798                                         cpu_spinwait();
  799                                 GIANT_RESTORE();
  800                                 continue;
  801                         } else if (LK_CAN_ADAPT(lk, flags) &&
  802                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  803                             spintries < ALK_RETRIES) {
  804                                 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  805                                     !atomic_cmpset_ptr(&lk->lk_lock, x,
  806                                     x | LK_EXCLUSIVE_SPINNERS))
  807                                         continue;
  808                                 if (flags & LK_INTERLOCK) {
  809                                         class->lc_unlock(ilk);
  810                                         flags &= ~LK_INTERLOCK;
  811                                 }
  812                                 GIANT_SAVE();
  813                                 spintries++;
  814                                 for (i = 0; i < ALK_LOOPS; i++) {
  815                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  816                                                 CTR4(KTR_LOCK,
  817                                     "%s: shared spinning on %p with %u and %u",
  818                                                     __func__, lk, spintries, i);
  819                                         if ((lk->lk_lock &
  820                                             LK_EXCLUSIVE_SPINNERS) == 0)
  821                                                 break;
  822                                         cpu_spinwait();
  823                                 }
  824                                 GIANT_RESTORE();
  825                                 if (i != ALK_LOOPS)
  826                                         continue;
  827                         }
  828 #endif
  829 
  830                         /*
  831                          * Acquire the sleepqueue chain lock because we
  832                          * probabilly will need to manipulate waiters flags.
  833                          */
  834                         sleepq_lock(&lk->lock_object);
  835                         x = lk->lk_lock;
  836 
  837                         /*
  838                          * if the lock has been released while we spun on
  839                          * the sleepqueue chain lock just try again.
  840                          */
  841                         if (x == LK_UNLOCKED) {
  842                                 sleepq_release(&lk->lock_object);
  843                                 continue;
  844                         }
  845 
  846 #ifdef ADAPTIVE_LOCKMGRS
  847                         /*
  848                          * The current lock owner might have started executing
  849                          * on another CPU (or the lock could have changed
  850                          * owner) while we were waiting on the turnstile
  851                          * chain lock.  If so, drop the turnstile lock and try
  852                          * again.
  853                          */
  854                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  855                             LK_HOLDER(x) != LK_KERNPROC) {
  856                                 owner = (struct thread *)LK_HOLDER(x);
  857                                 if (TD_IS_RUNNING(owner)) {
  858                                         sleepq_release(&lk->lock_object);
  859                                         continue;
  860                                 }
  861                         }
  862 #endif
  863 
  864                         /*
  865                          * The lock can be in the state where there is a
  866                          * pending queue of waiters, but still no owner.
  867                          * This happens when the lock is contested and an
  868                          * owner is going to claim the lock.
  869                          * If curthread is the one successfully acquiring it
  870                          * claim lock ownership and return, preserving waiters
  871                          * flags.
  872                          */
  873                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  874                         if ((x & ~v) == LK_UNLOCKED) {
  875                                 v &= ~LK_EXCLUSIVE_SPINNERS;
  876                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  877                                     tid | v)) {
  878                                         sleepq_release(&lk->lock_object);
  879                                         LOCK_LOG2(lk,
  880                                             "%s: %p claimed by a new writer",
  881                                             __func__, lk);
  882                                         break;
  883                                 }
  884                                 sleepq_release(&lk->lock_object);
  885                                 continue;
  886                         }
  887 
  888                         /*
  889                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
  890                          * fail, loop back and retry.
  891                          */
  892                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  893                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  894                                     x | LK_EXCLUSIVE_WAITERS)) {
  895                                         sleepq_release(&lk->lock_object);
  896                                         continue;
  897                                 }
  898                                 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  899                                     __func__, lk);
  900                         }
  901 
  902                         /*
  903                          * As far as we have been unable to acquire the
  904                          * exclusive lock and the exclusive waiters flag
  905                          * is set, we will sleep.
  906                          */
  907                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  908                             SQ_EXCLUSIVE_QUEUE);
  909                         flags &= ~LK_INTERLOCK;
  910                         if (error) {
  911                                 LOCK_LOG3(lk,
  912                                     "%s: interrupted sleep for %p with %d",
  913                                     __func__, lk, error);
  914                                 break;
  915                         }
  916                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  917                             __func__, lk);
  918                 }
  919                 if (error == 0) {
  920                         lock_profile_obtain_lock_success(&lk->lock_object,
  921                             contested, waittime, file, line);
  922                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  923                             lk->lk_recurse, file, line);
  924                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  925                             LK_TRYWIT(flags), file, line);
  926                         TD_LOCKS_INC(curthread);
  927                         STACK_SAVE(lk);
  928                 }
  929                 break;
  930         case LK_DOWNGRADE:
  931                 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  932                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  933                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  934                 TD_SLOCKS_INC(curthread);
  935 
  936                 /*
  937                  * In order to preserve waiters flags, just spin.
  938                  */
  939                 for (;;) {
  940                         x = lk->lk_lock;
  941                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  942                         x &= LK_ALL_WAITERS;
  943                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  944                             LK_SHARERS_LOCK(1) | x))
  945                                 break;
  946                         cpu_spinwait();
  947                 }
  948                 break;
  949         case LK_RELEASE:
  950                 _lockmgr_assert(lk, KA_LOCKED, file, line);
  951                 x = lk->lk_lock;
  952 
  953                 if ((x & LK_SHARE) == 0) {
  954 
  955                         /*
  956                          * As first option, treact the lock as if it has not
  957                          * any waiter.
  958                          * Fix-up the tid var if the lock has been disowned.
  959                          */
  960                         if (LK_HOLDER(x) == LK_KERNPROC)
  961                                 tid = LK_KERNPROC;
  962                         else {
  963                                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  964                                     file, line);
  965                                 TD_LOCKS_DEC(curthread);
  966                         }
  967                         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  968                             lk->lk_recurse, file, line);
  969 
  970                         /*
  971                          * The lock is held in exclusive mode.
  972                          * If the lock is recursed also, then unrecurse it.
  973                          */
  974                         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  975                                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  976                                     lk);
  977                                 lk->lk_recurse--;
  978                                 break;
  979                         }
  980                         if (tid != LK_KERNPROC)
  981                                 lock_profile_release_lock(&lk->lock_object);
  982 
  983                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  984                             LK_UNLOCKED))
  985                                 break;
  986 
  987                         sleepq_lock(&lk->lock_object);
  988                         x = lk->lk_lock;
  989                         v = LK_UNLOCKED;
  990 
  991                         /*
  992                          * If the lock has exclusive waiters, give them
  993                          * preference in order to avoid deadlock with
  994                          * shared runners up.
  995                          * If interruptible sleeps left the exclusive queue
  996                          * empty avoid a starvation for the threads sleeping
  997                          * on the shared queue by giving them precedence
  998                          * and cleaning up the exclusive waiters bit anyway.
  999                          * Please note that lk_exslpfail count may be lying
 1000                          * about the real number of waiters with the
 1001                          * LK_SLEEPFAIL flag on because they may be used in
 1002                          * conjuction with interruptible sleeps so
 1003                          * lk_exslpfail might be considered an 'upper limit'
 1004                          * bound, including the edge cases.
 1005                          */
 1006                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1007                         realexslp = sleepq_sleepcnt(&lk->lock_object,
 1008                             SQ_EXCLUSIVE_QUEUE);
 1009                         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
 1010                                 if (lk->lk_exslpfail < realexslp) {
 1011                                         lk->lk_exslpfail = 0;
 1012                                         queue = SQ_EXCLUSIVE_QUEUE;
 1013                                         v |= (x & LK_SHARED_WAITERS);
 1014                                 } else {
 1015                                         lk->lk_exslpfail = 0;
 1016                                         LOCK_LOG2(lk,
 1017                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1018                                             __func__, lk);
 1019                                         LOCK_LOG2(lk,
 1020                         "%s: %p waking up threads on the exclusive queue",
 1021                                             __func__, lk);
 1022                                         wakeup_swapper =
 1023                                             sleepq_broadcast(&lk->lock_object,
 1024                                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
 1025                                         queue = SQ_SHARED_QUEUE;
 1026                                 }
 1027                         } else {
 1028 
 1029                                 /*
 1030                                  * Exclusive waiters sleeping with LK_SLEEPFAIL
 1031                                  * on and using interruptible sleeps/timeout
 1032                                  * may have left spourious lk_exslpfail counts
 1033                                  * on, so clean it up anyway. 
 1034                                  */
 1035                                 lk->lk_exslpfail = 0;
 1036                                 queue = SQ_SHARED_QUEUE;
 1037                         }
 1038 
 1039                         LOCK_LOG3(lk,
 1040                             "%s: %p waking up threads on the %s queue",
 1041                             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
 1042                             "exclusive");
 1043                         atomic_store_rel_ptr(&lk->lk_lock, v);
 1044                         wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
 1045                             SLEEPQ_LK, 0, queue);
 1046                         sleepq_release(&lk->lock_object);
 1047                         break;
 1048                 } else
 1049                         wakeup_swapper = wakeupshlk(lk, file, line);
 1050                 break;
 1051         case LK_DRAIN:
 1052                 if (LK_CAN_WITNESS(flags))
 1053                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
 1054                             LOP_EXCLUSIVE, file, line, ilk);
 1055 
 1056                 /*
 1057                  * Trying to drain a lock we already own will result in a
 1058                  * deadlock.
 1059                  */
 1060                 if (lockmgr_xlocked(lk)) {
 1061                         if (flags & LK_INTERLOCK)
 1062                                 class->lc_unlock(ilk);
 1063                         panic("%s: draining %s with the lock held @ %s:%d\n",
 1064                             __func__, iwmesg, file, line);
 1065                 }
 1066 
 1067                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
 1068 #ifdef HWPMC_HOOKS
 1069                         PMC_SOFT_CALL( , , lock, failed);
 1070 #endif
 1071                         lock_profile_obtain_lock_failed(&lk->lock_object,
 1072                             &contested, &waittime);
 1073 
 1074                         /*
 1075                          * If the lock is expected to not sleep just give up
 1076                          * and return.
 1077                          */
 1078                         if (LK_TRYOP(flags)) {
 1079                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
 1080                                     __func__, lk);
 1081                                 error = EBUSY;
 1082                                 break;
 1083                         }
 1084 
 1085                         /*
 1086                          * Acquire the sleepqueue chain lock because we
 1087                          * probabilly will need to manipulate waiters flags.
 1088                          */
 1089                         sleepq_lock(&lk->lock_object);
 1090                         x = lk->lk_lock;
 1091 
 1092                         /*
 1093                          * if the lock has been released while we spun on
 1094                          * the sleepqueue chain lock just try again.
 1095                          */
 1096                         if (x == LK_UNLOCKED) {
 1097                                 sleepq_release(&lk->lock_object);
 1098                                 continue;
 1099                         }
 1100 
 1101                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
 1102                         if ((x & ~v) == LK_UNLOCKED) {
 1103                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
 1104 
 1105                                 /*
 1106                                  * If interruptible sleeps left the exclusive
 1107                                  * queue empty avoid a starvation for the
 1108                                  * threads sleeping on the shared queue by
 1109                                  * giving them precedence and cleaning up the
 1110                                  * exclusive waiters bit anyway.
 1111                                  * Please note that lk_exslpfail count may be
 1112                                  * lying about the real number of waiters with
 1113                                  * the LK_SLEEPFAIL flag on because they may
 1114                                  * be used in conjuction with interruptible
 1115                                  * sleeps so lk_exslpfail might be considered
 1116                                  * an 'upper limit' bound, including the edge
 1117                                  * cases.
 1118                                  */
 1119                                 if (v & LK_EXCLUSIVE_WAITERS) {
 1120                                         queue = SQ_EXCLUSIVE_QUEUE;
 1121                                         v &= ~LK_EXCLUSIVE_WAITERS;
 1122                                 } else {
 1123 
 1124                                         /*
 1125                                          * Exclusive waiters sleeping with
 1126                                          * LK_SLEEPFAIL on and using
 1127                                          * interruptible sleeps/timeout may
 1128                                          * have left spourious lk_exslpfail
 1129                                          * counts on, so clean it up anyway.
 1130                                          */
 1131                                         MPASS(v & LK_SHARED_WAITERS);
 1132                                         lk->lk_exslpfail = 0;
 1133                                         queue = SQ_SHARED_QUEUE;
 1134                                         v &= ~LK_SHARED_WAITERS;
 1135                                 }
 1136                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
 1137                                         realexslp =
 1138                                             sleepq_sleepcnt(&lk->lock_object,
 1139                                             SQ_EXCLUSIVE_QUEUE);
 1140                                         if (lk->lk_exslpfail >= realexslp) {
 1141                                                 lk->lk_exslpfail = 0;
 1142                                                 queue = SQ_SHARED_QUEUE;
 1143                                                 v &= ~LK_SHARED_WAITERS;
 1144                                                 if (realexslp != 0) {
 1145                                                         LOCK_LOG2(lk,
 1146                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1147                                                             __func__, lk);
 1148                                                         LOCK_LOG2(lk,
 1149                         "%s: %p waking up threads on the exclusive queue",
 1150                                                             __func__, lk);
 1151                                                         wakeup_swapper =
 1152                                                             sleepq_broadcast(
 1153                                                             &lk->lock_object,
 1154                                                             SLEEPQ_LK, 0,
 1155                                                             SQ_EXCLUSIVE_QUEUE);
 1156                                                 }
 1157                                         } else
 1158                                                 lk->lk_exslpfail = 0;
 1159                                 }
 1160                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
 1161                                         sleepq_release(&lk->lock_object);
 1162                                         continue;
 1163                                 }
 1164                                 LOCK_LOG3(lk,
 1165                                 "%s: %p waking up all threads on the %s queue",
 1166                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
 1167                                     "shared" : "exclusive");
 1168                                 wakeup_swapper |= sleepq_broadcast(
 1169                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
 1170 
 1171                                 /*
 1172                                  * If shared waiters have been woken up we need
 1173                                  * to wait for one of them to acquire the lock
 1174                                  * before to set the exclusive waiters in
 1175                                  * order to avoid a deadlock.
 1176                                  */
 1177                                 if (queue == SQ_SHARED_QUEUE) {
 1178                                         for (v = lk->lk_lock;
 1179                                             (v & LK_SHARE) && !LK_SHARERS(v);
 1180                                             v = lk->lk_lock)
 1181                                                 cpu_spinwait();
 1182                                 }
 1183                         }
 1184 
 1185                         /*
 1186                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
 1187                          * fail, loop back and retry.
 1188                          */
 1189                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
 1190                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
 1191                                     x | LK_EXCLUSIVE_WAITERS)) {
 1192                                         sleepq_release(&lk->lock_object);
 1193                                         continue;
 1194                                 }
 1195                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
 1196                                     __func__, lk);
 1197                         }
 1198 
 1199                         /*
 1200                          * As far as we have been unable to acquire the
 1201                          * exclusive lock and the exclusive waiters flag
 1202                          * is set, we will sleep.
 1203                          */
 1204                         if (flags & LK_INTERLOCK) {
 1205                                 class->lc_unlock(ilk);
 1206                                 flags &= ~LK_INTERLOCK;
 1207                         }
 1208                         GIANT_SAVE();
 1209                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
 1210                             SQ_EXCLUSIVE_QUEUE);
 1211                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
 1212                         GIANT_RESTORE();
 1213                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
 1214                             __func__, lk);
 1215                 }
 1216 
 1217                 if (error == 0) {
 1218                         lock_profile_obtain_lock_success(&lk->lock_object,
 1219                             contested, waittime, file, line);
 1220                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
 1221                             lk->lk_recurse, file, line);
 1222                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
 1223                             LK_TRYWIT(flags), file, line);
 1224                         TD_LOCKS_INC(curthread);
 1225                         STACK_SAVE(lk);
 1226                 }
 1227                 break;
 1228         default:
 1229                 if (flags & LK_INTERLOCK)
 1230                         class->lc_unlock(ilk);
 1231                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
 1232         }
 1233 
 1234         if (flags & LK_INTERLOCK)
 1235                 class->lc_unlock(ilk);
 1236         if (wakeup_swapper)
 1237                 kick_proc0();
 1238 
 1239         return (error);
 1240 }
 1241 
 1242 void
 1243 _lockmgr_disown(struct lock *lk, const char *file, int line)
 1244 {
 1245         uintptr_t tid, x;
 1246 
 1247         if (SCHEDULER_STOPPED())
 1248                 return;
 1249 
 1250         tid = (uintptr_t)curthread;
 1251         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
 1252 
 1253         /*
 1254          * If the owner is already LK_KERNPROC just skip the whole operation.
 1255          */
 1256         if (LK_HOLDER(lk->lk_lock) != tid)
 1257                 return;
 1258         lock_profile_release_lock(&lk->lock_object);
 1259         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
 1260         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
 1261         TD_LOCKS_DEC(curthread);
 1262         STACK_SAVE(lk);
 1263 
 1264         /*
 1265          * In order to preserve waiters flags, just spin.
 1266          */
 1267         for (;;) {
 1268                 x = lk->lk_lock;
 1269                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1270                 x &= LK_ALL_WAITERS;
 1271                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 1272                     LK_KERNPROC | x))
 1273                         return;
 1274                 cpu_spinwait();
 1275         }
 1276 }
 1277 
 1278 void
 1279 lockmgr_printinfo(struct lock *lk)
 1280 {
 1281         struct thread *td;
 1282         uintptr_t x;
 1283 
 1284         if (lk->lk_lock == LK_UNLOCKED)
 1285                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
 1286         else if (lk->lk_lock & LK_SHARE)
 1287                 printf("lock type %s: SHARED (count %ju)\n",
 1288                     lk->lock_object.lo_name,
 1289                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1290         else {
 1291                 td = lockmgr_xholder(lk);
 1292                 printf("lock type %s: EXCL by thread %p (pid %d)\n",
 1293                     lk->lock_object.lo_name, td, td->td_proc->p_pid);
 1294         }
 1295 
 1296         x = lk->lk_lock;
 1297         if (x & LK_EXCLUSIVE_WAITERS)
 1298                 printf(" with exclusive waiters pending\n");
 1299         if (x & LK_SHARED_WAITERS)
 1300                 printf(" with shared waiters pending\n");
 1301         if (x & LK_EXCLUSIVE_SPINNERS)
 1302                 printf(" with exclusive spinners pending\n");
 1303 
 1304         STACK_PRINT(lk);
 1305 }
 1306 
 1307 int
 1308 lockstatus(struct lock *lk)
 1309 {
 1310         uintptr_t v, x;
 1311         int ret;
 1312 
 1313         ret = LK_SHARED;
 1314         x = lk->lk_lock;
 1315         v = LK_HOLDER(x);
 1316 
 1317         if ((x & LK_SHARE) == 0) {
 1318                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
 1319                         ret = LK_EXCLUSIVE;
 1320                 else
 1321                         ret = LK_EXCLOTHER;
 1322         } else if (x == LK_UNLOCKED)
 1323                 ret = 0;
 1324 
 1325         return (ret);
 1326 }
 1327 
 1328 #ifdef INVARIANT_SUPPORT
 1329 
 1330 FEATURE(invariant_support,
 1331     "Support for modules compiled with INVARIANTS option");
 1332 
 1333 #ifndef INVARIANTS
 1334 #undef  _lockmgr_assert
 1335 #endif
 1336 
 1337 void
 1338 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
 1339 {
 1340         int slocked = 0;
 1341 
 1342         if (panicstr != NULL)
 1343                 return;
 1344         switch (what) {
 1345         case KA_SLOCKED:
 1346         case KA_SLOCKED | KA_NOTRECURSED:
 1347         case KA_SLOCKED | KA_RECURSED:
 1348                 slocked = 1;
 1349         case KA_LOCKED:
 1350         case KA_LOCKED | KA_NOTRECURSED:
 1351         case KA_LOCKED | KA_RECURSED:
 1352 #ifdef WITNESS
 1353 
 1354                 /*
 1355                  * We cannot trust WITNESS if the lock is held in exclusive
 1356                  * mode and a call to lockmgr_disown() happened.
 1357                  * Workaround this skipping the check if the lock is held in
 1358                  * exclusive mode even for the KA_LOCKED case.
 1359                  */
 1360                 if (slocked || (lk->lk_lock & LK_SHARE)) {
 1361                         witness_assert(&lk->lock_object, what, file, line);
 1362                         break;
 1363                 }
 1364 #endif
 1365                 if (lk->lk_lock == LK_UNLOCKED ||
 1366                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
 1367                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
 1368                         panic("Lock %s not %slocked @ %s:%d\n",
 1369                             lk->lock_object.lo_name, slocked ? "share" : "",
 1370                             file, line);
 1371 
 1372                 if ((lk->lk_lock & LK_SHARE) == 0) {
 1373                         if (lockmgr_recursed(lk)) {
 1374                                 if (what & KA_NOTRECURSED)
 1375                                         panic("Lock %s recursed @ %s:%d\n",
 1376                                             lk->lock_object.lo_name, file,
 1377                                             line);
 1378                         } else if (what & KA_RECURSED)
 1379                                 panic("Lock %s not recursed @ %s:%d\n",
 1380                                     lk->lock_object.lo_name, file, line);
 1381                 }
 1382                 break;
 1383         case KA_XLOCKED:
 1384         case KA_XLOCKED | KA_NOTRECURSED:
 1385         case KA_XLOCKED | KA_RECURSED:
 1386                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
 1387                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1388                             lk->lock_object.lo_name, file, line);
 1389                 if (lockmgr_recursed(lk)) {
 1390                         if (what & KA_NOTRECURSED)
 1391                                 panic("Lock %s recursed @ %s:%d\n",
 1392                                     lk->lock_object.lo_name, file, line);
 1393                 } else if (what & KA_RECURSED)
 1394                         panic("Lock %s not recursed @ %s:%d\n",
 1395                             lk->lock_object.lo_name, file, line);
 1396                 break;
 1397         case KA_UNLOCKED:
 1398                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
 1399                         panic("Lock %s exclusively locked @ %s:%d\n",
 1400                             lk->lock_object.lo_name, file, line);
 1401                 break;
 1402         default:
 1403                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
 1404                     line);
 1405         }
 1406 }
 1407 #endif
 1408 
 1409 #ifdef DDB
 1410 int
 1411 lockmgr_chain(struct thread *td, struct thread **ownerp)
 1412 {
 1413         struct lock *lk;
 1414 
 1415         lk = td->td_wchan;
 1416 
 1417         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
 1418                 return (0);
 1419         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
 1420         if (lk->lk_lock & LK_SHARE)
 1421                 db_printf("SHARED (count %ju)\n",
 1422                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1423         else
 1424                 db_printf("EXCL\n");
 1425         *ownerp = lockmgr_xholder(lk);
 1426 
 1427         return (1);
 1428 }
 1429 
 1430 static void
 1431 db_show_lockmgr(struct lock_object *lock)
 1432 {
 1433         struct thread *td;
 1434         struct lock *lk;
 1435 
 1436         lk = (struct lock *)lock;
 1437 
 1438         db_printf(" state: ");
 1439         if (lk->lk_lock == LK_UNLOCKED)
 1440                 db_printf("UNLOCKED\n");
 1441         else if (lk->lk_lock & LK_SHARE)
 1442                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
 1443         else {
 1444                 td = lockmgr_xholder(lk);
 1445                 if (td == (struct thread *)LK_KERNPROC)
 1446                         db_printf("XLOCK: LK_KERNPROC\n");
 1447                 else
 1448                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1449                             td->td_tid, td->td_proc->p_pid,
 1450                             td->td_proc->p_comm);
 1451                 if (lockmgr_recursed(lk))
 1452                         db_printf(" recursed: %d\n", lk->lk_recurse);
 1453         }
 1454         db_printf(" waiters: ");
 1455         switch (lk->lk_lock & LK_ALL_WAITERS) {
 1456         case LK_SHARED_WAITERS:
 1457                 db_printf("shared\n");
 1458                 break;
 1459         case LK_EXCLUSIVE_WAITERS:
 1460                 db_printf("exclusive\n");
 1461                 break;
 1462         case LK_ALL_WAITERS:
 1463                 db_printf("shared and exclusive\n");
 1464                 break;
 1465         default:
 1466                 db_printf("none\n");
 1467         }
 1468         db_printf(" spinners: ");
 1469         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
 1470                 db_printf("exclusive\n");
 1471         else
 1472                 db_printf("none\n");
 1473 }
 1474 #endif

Cache object: 1c123775b4a586cbcf8fdd011705837e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.