The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include "opt_adaptive_lockmgrs.h"
   30 #include "opt_ddb.h"
   31 #include "opt_hwpmc_hooks.h"
   32 #include "opt_kdtrace.h"
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_lock.c 236238 2012-05-29 14:50:21Z fabient $");
   36 
   37 #include <sys/param.h>
   38 #include <sys/ktr.h>
   39 #include <sys/lock.h>
   40 #include <sys/lock_profile.h>
   41 #include <sys/lockmgr.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/sleepqueue.h>
   45 #ifdef DEBUG_LOCKS
   46 #include <sys/stack.h>
   47 #endif
   48 #include <sys/sysctl.h>
   49 #include <sys/systm.h>
   50 
   51 #include <machine/cpu.h>
   52 
   53 #ifdef DDB
   54 #include <ddb/ddb.h>
   55 #endif
   56 
   57 #ifdef HWPMC_HOOKS
   58 #include <sys/pmckern.h>
   59 PMC_SOFT_DECLARE( , , lock, failed);
   60 #endif
   61 
   62 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
   63     (LK_ADAPTIVE | LK_NOSHARE));
   64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
   65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
   66 
   67 #define SQ_EXCLUSIVE_QUEUE      0
   68 #define SQ_SHARED_QUEUE         1
   69 
   70 #ifdef ADAPTIVE_LOCKMGRS
   71 #define ALK_RETRIES             10
   72 #define ALK_LOOPS               10000
   73 #endif
   74 
   75 #ifndef INVARIANTS
   76 #define _lockmgr_assert(lk, what, file, line)
   77 #define TD_LOCKS_INC(td)
   78 #define TD_LOCKS_DEC(td)
   79 #else
   80 #define TD_LOCKS_INC(td)        ((td)->td_locks++)
   81 #define TD_LOCKS_DEC(td)        ((td)->td_locks--)
   82 #endif
   83 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
   84 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
   85 
   86 #ifndef DEBUG_LOCKS
   87 #define STACK_PRINT(lk)
   88 #define STACK_SAVE(lk)
   89 #define STACK_ZERO(lk)
   90 #else
   91 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
   92 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
   93 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
   94 #endif
   95 
   96 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
   97         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   98                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
   99 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
  100         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
  101                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
  102 
  103 #define GIANT_DECLARE                                                   \
  104         int _i = 0;                                                     \
  105         WITNESS_SAVE_DECL(Giant)
  106 #define GIANT_RESTORE() do {                                            \
  107         if (_i > 0) {                                                   \
  108                 while (_i--)                                            \
  109                         mtx_lock(&Giant);                               \
  110                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  111         }                                                               \
  112 } while (0)
  113 #define GIANT_SAVE() do {                                               \
  114         if (mtx_owned(&Giant)) {                                        \
  115                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
  116                 while (mtx_owned(&Giant)) {                             \
  117                         _i++;                                           \
  118                         mtx_unlock(&Giant);                             \
  119                 }                                                       \
  120         }                                                               \
  121 } while (0)
  122 
  123 #define LK_CAN_SHARE(x)                                                 \
  124         (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||      \
  125         ((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||                           \
  126         curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  127 #define LK_TRYOP(x)                                                     \
  128         ((x) & LK_NOWAIT)
  129 
  130 #define LK_CAN_WITNESS(x)                                               \
  131         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  132 #define LK_TRYWIT(x)                                                    \
  133         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  134 
  135 #define LK_CAN_ADAPT(lk, f)                                             \
  136         (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&             \
  137         ((f) & LK_SLEEPFAIL) == 0)
  138 
  139 #define lockmgr_disowned(lk)                                            \
  140         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  141 
  142 #define lockmgr_xlocked(lk)                                             \
  143         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  144 
  145 static void      assert_lockmgr(struct lock_object *lock, int how);
  146 #ifdef DDB
  147 static void      db_show_lockmgr(struct lock_object *lock);
  148 #endif
  149 static void      lock_lockmgr(struct lock_object *lock, int how);
  150 #ifdef KDTRACE_HOOKS
  151 static int       owner_lockmgr(struct lock_object *lock, struct thread **owner);
  152 #endif
  153 static int       unlock_lockmgr(struct lock_object *lock);
  154 
  155 struct lock_class lock_class_lockmgr = {
  156         .lc_name = "lockmgr",
  157         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  158         .lc_assert = assert_lockmgr,
  159 #ifdef DDB
  160         .lc_ddb_show = db_show_lockmgr,
  161 #endif
  162         .lc_lock = lock_lockmgr,
  163         .lc_unlock = unlock_lockmgr,
  164 #ifdef KDTRACE_HOOKS
  165         .lc_owner = owner_lockmgr,
  166 #endif
  167 };
  168 
  169 static __inline struct thread *
  170 lockmgr_xholder(struct lock *lk)
  171 {
  172         uintptr_t x;
  173 
  174         x = lk->lk_lock;
  175         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  176 }
  177 
  178 /*
  179  * It assumes sleepq_lock held and returns with this one unheld.
  180  * It also assumes the generic interlock is sane and previously checked.
  181  * If LK_INTERLOCK is specified the interlock is not reacquired after the
  182  * sleep.
  183  */
  184 static __inline int
  185 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  186     const char *wmesg, int pri, int timo, int queue)
  187 {
  188         GIANT_DECLARE;
  189         struct lock_class *class;
  190         int catch, error;
  191 
  192         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  193         catch = pri & PCATCH;
  194         pri &= PRIMASK;
  195         error = 0;
  196 
  197         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  198             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  199 
  200         if (flags & LK_INTERLOCK)
  201                 class->lc_unlock(ilk);
  202         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
  203                 lk->lk_exslpfail++;
  204         GIANT_SAVE();
  205         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  206             SLEEPQ_INTERRUPTIBLE : 0), queue);
  207         if ((flags & LK_TIMELOCK) && timo)
  208                 sleepq_set_timeout(&lk->lock_object, timo);
  209 
  210         /*
  211          * Decisional switch for real sleeping.
  212          */
  213         if ((flags & LK_TIMELOCK) && timo && catch)
  214                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
  215         else if ((flags & LK_TIMELOCK) && timo)
  216                 error = sleepq_timedwait(&lk->lock_object, pri);
  217         else if (catch)
  218                 error = sleepq_wait_sig(&lk->lock_object, pri);
  219         else
  220                 sleepq_wait(&lk->lock_object, pri);
  221         GIANT_RESTORE();
  222         if ((flags & LK_SLEEPFAIL) && error == 0)
  223                 error = ENOLCK;
  224 
  225         return (error);
  226 }
  227 
  228 static __inline int
  229 wakeupshlk(struct lock *lk, const char *file, int line)
  230 {
  231         uintptr_t v, x;
  232         u_int realexslp;
  233         int queue, wakeup_swapper;
  234 
  235         TD_LOCKS_DEC(curthread);
  236         TD_SLOCKS_DEC(curthread);
  237         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  238         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  239 
  240         wakeup_swapper = 0;
  241         for (;;) {
  242                 x = lk->lk_lock;
  243 
  244                 /*
  245                  * If there is more than one shared lock held, just drop one
  246                  * and return.
  247                  */
  248                 if (LK_SHARERS(x) > 1) {
  249                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  250                             x - LK_ONE_SHARER))
  251                                 break;
  252                         continue;
  253                 }
  254 
  255                 /*
  256                  * If there are not waiters on the exclusive queue, drop the
  257                  * lock quickly.
  258                  */
  259                 if ((x & LK_ALL_WAITERS) == 0) {
  260                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  261                             LK_SHARERS_LOCK(1));
  262                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  263                                 break;
  264                         continue;
  265                 }
  266 
  267                 /*
  268                  * We should have a sharer with waiters, so enter the hard
  269                  * path in order to handle wakeups correctly.
  270                  */
  271                 sleepq_lock(&lk->lock_object);
  272                 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  273                 v = LK_UNLOCKED;
  274 
  275                 /*
  276                  * If the lock has exclusive waiters, give them preference in
  277                  * order to avoid deadlock with shared runners up.
  278                  * If interruptible sleeps left the exclusive queue empty
  279                  * avoid a starvation for the threads sleeping on the shared
  280                  * queue by giving them precedence and cleaning up the
  281                  * exclusive waiters bit anyway.
  282                  * Please note that lk_exslpfail count may be lying about
  283                  * the real number of waiters with the LK_SLEEPFAIL flag on
  284                  * because they may be used in conjuction with interruptible
  285                  * sleeps so lk_exslpfail might be considered an 'upper limit'
  286                  * bound, including the edge cases.
  287                  */
  288                 realexslp = sleepq_sleepcnt(&lk->lock_object,
  289                     SQ_EXCLUSIVE_QUEUE);
  290                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  291                         if (lk->lk_exslpfail < realexslp) {
  292                                 lk->lk_exslpfail = 0;
  293                                 queue = SQ_EXCLUSIVE_QUEUE;
  294                                 v |= (x & LK_SHARED_WAITERS);
  295                         } else {
  296                                 lk->lk_exslpfail = 0;
  297                                 LOCK_LOG2(lk,
  298                                     "%s: %p has only LK_SLEEPFAIL sleepers",
  299                                     __func__, lk);
  300                                 LOCK_LOG2(lk,
  301                             "%s: %p waking up threads on the exclusive queue",
  302                                     __func__, lk);
  303                                 wakeup_swapper =
  304                                     sleepq_broadcast(&lk->lock_object,
  305                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  306                                 queue = SQ_SHARED_QUEUE;
  307                         }
  308                                 
  309                 } else {
  310 
  311                         /*
  312                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
  313                          * and using interruptible sleeps/timeout may have
  314                          * left spourious lk_exslpfail counts on, so clean
  315                          * it up anyway.
  316                          */
  317                         lk->lk_exslpfail = 0;
  318                         queue = SQ_SHARED_QUEUE;
  319                 }
  320 
  321                 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  322                     v)) {
  323                         sleepq_release(&lk->lock_object);
  324                         continue;
  325                 }
  326                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  327                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  328                     "exclusive");
  329                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  330                     0, queue);
  331                 sleepq_release(&lk->lock_object);
  332                 break;
  333         }
  334 
  335         lock_profile_release_lock(&lk->lock_object);
  336         return (wakeup_swapper);
  337 }
  338 
  339 static void
  340 assert_lockmgr(struct lock_object *lock, int what)
  341 {
  342 
  343         panic("lockmgr locks do not support assertions");
  344 }
  345 
  346 static void
  347 lock_lockmgr(struct lock_object *lock, int how)
  348 {
  349 
  350         panic("lockmgr locks do not support sleep interlocking");
  351 }
  352 
  353 static int
  354 unlock_lockmgr(struct lock_object *lock)
  355 {
  356 
  357         panic("lockmgr locks do not support sleep interlocking");
  358 }
  359 
  360 #ifdef KDTRACE_HOOKS
  361 static int
  362 owner_lockmgr(struct lock_object *lock, struct thread **owner)
  363 {
  364 
  365         panic("lockmgr locks do not support owner inquiring");
  366 }
  367 #endif
  368 
  369 void
  370 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  371 {
  372         int iflags;
  373 
  374         MPASS((flags & ~LK_INIT_MASK) == 0);
  375         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  376             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  377             &lk->lk_lock));
  378 
  379         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  380         if (flags & LK_CANRECURSE)
  381                 iflags |= LO_RECURSABLE;
  382         if ((flags & LK_NODUP) == 0)
  383                 iflags |= LO_DUPOK;
  384         if (flags & LK_NOPROFILE)
  385                 iflags |= LO_NOPROFILE;
  386         if ((flags & LK_NOWITNESS) == 0)
  387                 iflags |= LO_WITNESS;
  388         if (flags & LK_QUIET)
  389                 iflags |= LO_QUIET;
  390         iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  391 
  392         lk->lk_lock = LK_UNLOCKED;
  393         lk->lk_recurse = 0;
  394         lk->lk_exslpfail = 0;
  395         lk->lk_timo = timo;
  396         lk->lk_pri = pri;
  397         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  398         STACK_ZERO(lk);
  399 }
  400 
  401 /*
  402  * XXX: Gross hacks to manipulate external lock flags after
  403  * initialization.  Used for certain vnode and buf locks.
  404  */
  405 void
  406 lockallowshare(struct lock *lk)
  407 {
  408 
  409         lockmgr_assert(lk, KA_XLOCKED);
  410         lk->lock_object.lo_flags &= ~LK_NOSHARE;
  411 }
  412 
  413 void
  414 lockallowrecurse(struct lock *lk)
  415 {
  416 
  417         lockmgr_assert(lk, KA_XLOCKED);
  418         lk->lock_object.lo_flags |= LO_RECURSABLE;
  419 }
  420 
  421 void
  422 lockdisablerecurse(struct lock *lk)
  423 {
  424 
  425         lockmgr_assert(lk, KA_XLOCKED);
  426         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
  427 }
  428 
  429 void
  430 lockdestroy(struct lock *lk)
  431 {
  432 
  433         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  434         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  435         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
  436         lock_destroy(&lk->lock_object);
  437 }
  438 
  439 int
  440 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  441     const char *wmesg, int pri, int timo, const char *file, int line)
  442 {
  443         GIANT_DECLARE;
  444         struct lock_class *class;
  445         const char *iwmesg;
  446         uintptr_t tid, v, x;
  447         u_int op, realexslp;
  448         int error, ipri, itimo, queue, wakeup_swapper;
  449 #ifdef LOCK_PROFILING
  450         uint64_t waittime = 0;
  451         int contested = 0;
  452 #endif
  453 #ifdef ADAPTIVE_LOCKMGRS
  454         volatile struct thread *owner;
  455         u_int i, spintries = 0;
  456 #endif
  457 
  458         error = 0;
  459         tid = (uintptr_t)curthread;
  460         op = (flags & LK_TYPE_MASK);
  461         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  462         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  463         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  464 
  465         MPASS((flags & ~LK_TOTAL_MASK) == 0);
  466         KASSERT((op & (op - 1)) == 0,
  467             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  468         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  469             (op != LK_DOWNGRADE && op != LK_RELEASE),
  470             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  471             __func__, file, line));
  472         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  473             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  474             __func__, file, line));
  475 
  476         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  477         if (panicstr != NULL) {
  478                 if (flags & LK_INTERLOCK)
  479                         class->lc_unlock(ilk);
  480                 return (0);
  481         }
  482 
  483         if (lk->lock_object.lo_flags & LK_NOSHARE) {
  484                 switch (op) {
  485                 case LK_SHARED:
  486                         op = LK_EXCLUSIVE;
  487                         break;
  488                 case LK_UPGRADE:
  489                 case LK_DOWNGRADE:
  490                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
  491                             file, line);
  492                         return (0);
  493                 }
  494         }
  495 
  496         wakeup_swapper = 0;
  497         switch (op) {
  498         case LK_SHARED:
  499                 if (LK_CAN_WITNESS(flags))
  500                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  501                             file, line, ilk);
  502                 for (;;) {
  503                         x = lk->lk_lock;
  504 
  505                         /*
  506                          * If no other thread has an exclusive lock, or
  507                          * no exclusive waiter is present, bump the count of
  508                          * sharers.  Since we have to preserve the state of
  509                          * waiters, if we fail to acquire the shared lock
  510                          * loop back and retry.
  511                          */
  512                         if (LK_CAN_SHARE(x)) {
  513                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  514                                     x + LK_ONE_SHARER))
  515                                         break;
  516                                 continue;
  517                         }
  518 #ifdef HWPMC_HOOKS
  519                         PMC_SOFT_CALL( , , lock, failed);
  520 #endif
  521                         lock_profile_obtain_lock_failed(&lk->lock_object,
  522                             &contested, &waittime);
  523 
  524                         /*
  525                          * If the lock is already held by curthread in
  526                          * exclusive way avoid a deadlock.
  527                          */
  528                         if (LK_HOLDER(x) == tid) {
  529                                 LOCK_LOG2(lk,
  530                                     "%s: %p already held in exclusive mode",
  531                                     __func__, lk);
  532                                 error = EDEADLK;
  533                                 break;
  534                         }
  535 
  536                         /*
  537                          * If the lock is expected to not sleep just give up
  538                          * and return.
  539                          */
  540                         if (LK_TRYOP(flags)) {
  541                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  542                                     __func__, lk);
  543                                 error = EBUSY;
  544                                 break;
  545                         }
  546 
  547 #ifdef ADAPTIVE_LOCKMGRS
  548                         /*
  549                          * If the owner is running on another CPU, spin until
  550                          * the owner stops running or the state of the lock
  551                          * changes.  We need a double-state handle here
  552                          * because for a failed acquisition the lock can be
  553                          * either held in exclusive mode or shared mode
  554                          * (for the writer starvation avoidance technique).
  555                          */
  556                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  557                             LK_HOLDER(x) != LK_KERNPROC) {
  558                                 owner = (struct thread *)LK_HOLDER(x);
  559                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  560                                         CTR3(KTR_LOCK,
  561                                             "%s: spinning on %p held by %p",
  562                                             __func__, lk, owner);
  563 
  564                                 /*
  565                                  * If we are holding also an interlock drop it
  566                                  * in order to avoid a deadlock if the lockmgr
  567                                  * owner is adaptively spinning on the
  568                                  * interlock itself.
  569                                  */
  570                                 if (flags & LK_INTERLOCK) {
  571                                         class->lc_unlock(ilk);
  572                                         flags &= ~LK_INTERLOCK;
  573                                 }
  574                                 GIANT_SAVE();
  575                                 while (LK_HOLDER(lk->lk_lock) ==
  576                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  577                                         cpu_spinwait();
  578                                 GIANT_RESTORE();
  579                                 continue;
  580                         } else if (LK_CAN_ADAPT(lk, flags) &&
  581                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  582                             spintries < ALK_RETRIES) {
  583                                 if (flags & LK_INTERLOCK) {
  584                                         class->lc_unlock(ilk);
  585                                         flags &= ~LK_INTERLOCK;
  586                                 }
  587                                 GIANT_SAVE();
  588                                 spintries++;
  589                                 for (i = 0; i < ALK_LOOPS; i++) {
  590                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  591                                                 CTR4(KTR_LOCK,
  592                                     "%s: shared spinning on %p with %u and %u",
  593                                                     __func__, lk, spintries, i);
  594                                         x = lk->lk_lock;
  595                                         if ((x & LK_SHARE) == 0 ||
  596                                             LK_CAN_SHARE(x) != 0)
  597                                                 break;
  598                                         cpu_spinwait();
  599                                 }
  600                                 GIANT_RESTORE();
  601                                 if (i != ALK_LOOPS)
  602                                         continue;
  603                         }
  604 #endif
  605 
  606                         /*
  607                          * Acquire the sleepqueue chain lock because we
  608                          * probabilly will need to manipulate waiters flags.
  609                          */
  610                         sleepq_lock(&lk->lock_object);
  611                         x = lk->lk_lock;
  612 
  613                         /*
  614                          * if the lock can be acquired in shared mode, try
  615                          * again.
  616                          */
  617                         if (LK_CAN_SHARE(x)) {
  618                                 sleepq_release(&lk->lock_object);
  619                                 continue;
  620                         }
  621 
  622 #ifdef ADAPTIVE_LOCKMGRS
  623                         /*
  624                          * The current lock owner might have started executing
  625                          * on another CPU (or the lock could have changed
  626                          * owner) while we were waiting on the turnstile
  627                          * chain lock.  If so, drop the turnstile lock and try
  628                          * again.
  629                          */
  630                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  631                             LK_HOLDER(x) != LK_KERNPROC) {
  632                                 owner = (struct thread *)LK_HOLDER(x);
  633                                 if (TD_IS_RUNNING(owner)) {
  634                                         sleepq_release(&lk->lock_object);
  635                                         continue;
  636                                 }
  637                         }
  638 #endif
  639 
  640                         /*
  641                          * Try to set the LK_SHARED_WAITERS flag.  If we fail,
  642                          * loop back and retry.
  643                          */
  644                         if ((x & LK_SHARED_WAITERS) == 0) {
  645                                 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  646                                     x | LK_SHARED_WAITERS)) {
  647                                         sleepq_release(&lk->lock_object);
  648                                         continue;
  649                                 }
  650                                 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  651                                     __func__, lk);
  652                         }
  653 
  654                         /*
  655                          * As far as we have been unable to acquire the
  656                          * shared lock and the shared waiters flag is set,
  657                          * we will sleep.
  658                          */
  659                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  660                             SQ_SHARED_QUEUE);
  661                         flags &= ~LK_INTERLOCK;
  662                         if (error) {
  663                                 LOCK_LOG3(lk,
  664                                     "%s: interrupted sleep for %p with %d",
  665                                     __func__, lk, error);
  666                                 break;
  667                         }
  668                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  669                             __func__, lk);
  670                 }
  671                 if (error == 0) {
  672                         lock_profile_obtain_lock_success(&lk->lock_object,
  673                             contested, waittime, file, line);
  674                         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  675                             line);
  676                         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  677                             line);
  678                         TD_LOCKS_INC(curthread);
  679                         TD_SLOCKS_INC(curthread);
  680                         STACK_SAVE(lk);
  681                 }
  682                 break;
  683         case LK_UPGRADE:
  684                 _lockmgr_assert(lk, KA_SLOCKED, file, line);
  685                 v = lk->lk_lock;
  686                 x = v & LK_ALL_WAITERS;
  687                 v &= LK_EXCLUSIVE_SPINNERS;
  688 
  689                 /*
  690                  * Try to switch from one shared lock to an exclusive one.
  691                  * We need to preserve waiters flags during the operation.
  692                  */
  693                 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  694                     tid | x)) {
  695                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  696                             line);
  697                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  698                             LK_TRYWIT(flags), file, line);
  699                         TD_SLOCKS_DEC(curthread);
  700                         break;
  701                 }
  702 
  703                 /*
  704                  * We have been unable to succeed in upgrading, so just
  705                  * give up the shared lock.
  706                  */
  707                 wakeup_swapper |= wakeupshlk(lk, file, line);
  708 
  709                 /* FALLTHROUGH */
  710         case LK_EXCLUSIVE:
  711                 if (LK_CAN_WITNESS(flags))
  712                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  713                             LOP_EXCLUSIVE, file, line, ilk);
  714 
  715                 /*
  716                  * If curthread already holds the lock and this one is
  717                  * allowed to recurse, simply recurse on it.
  718                  */
  719                 if (lockmgr_xlocked(lk)) {
  720                         if ((flags & LK_CANRECURSE) == 0 &&
  721                             (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  722 
  723                                 /*
  724                                  * If the lock is expected to not panic just
  725                                  * give up and return.
  726                                  */
  727                                 if (LK_TRYOP(flags)) {
  728                                         LOCK_LOG2(lk,
  729                                             "%s: %p fails the try operation",
  730                                             __func__, lk);
  731                                         error = EBUSY;
  732                                         break;
  733                                 }
  734                                 if (flags & LK_INTERLOCK)
  735                                         class->lc_unlock(ilk);
  736                 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  737                                     __func__, iwmesg, file, line);
  738                         }
  739                         lk->lk_recurse++;
  740                         LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  741                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  742                             lk->lk_recurse, file, line);
  743                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  744                             LK_TRYWIT(flags), file, line);
  745                         TD_LOCKS_INC(curthread);
  746                         break;
  747                 }
  748 
  749                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  750                     tid)) {
  751 #ifdef HWPMC_HOOKS
  752                         PMC_SOFT_CALL( , , lock, failed);
  753 #endif
  754                         lock_profile_obtain_lock_failed(&lk->lock_object,
  755                             &contested, &waittime);
  756 
  757                         /*
  758                          * If the lock is expected to not sleep just give up
  759                          * and return.
  760                          */
  761                         if (LK_TRYOP(flags)) {
  762                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  763                                     __func__, lk);
  764                                 error = EBUSY;
  765                                 break;
  766                         }
  767 
  768 #ifdef ADAPTIVE_LOCKMGRS
  769                         /*
  770                          * If the owner is running on another CPU, spin until
  771                          * the owner stops running or the state of the lock
  772                          * changes.
  773                          */
  774                         x = lk->lk_lock;
  775                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  776                             LK_HOLDER(x) != LK_KERNPROC) {
  777                                 owner = (struct thread *)LK_HOLDER(x);
  778                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  779                                         CTR3(KTR_LOCK,
  780                                             "%s: spinning on %p held by %p",
  781                                             __func__, lk, owner);
  782 
  783                                 /*
  784                                  * If we are holding also an interlock drop it
  785                                  * in order to avoid a deadlock if the lockmgr
  786                                  * owner is adaptively spinning on the
  787                                  * interlock itself.
  788                                  */
  789                                 if (flags & LK_INTERLOCK) {
  790                                         class->lc_unlock(ilk);
  791                                         flags &= ~LK_INTERLOCK;
  792                                 }
  793                                 GIANT_SAVE();
  794                                 while (LK_HOLDER(lk->lk_lock) ==
  795                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  796                                         cpu_spinwait();
  797                                 GIANT_RESTORE();
  798                                 continue;
  799                         } else if (LK_CAN_ADAPT(lk, flags) &&
  800                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  801                             spintries < ALK_RETRIES) {
  802                                 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  803                                     !atomic_cmpset_ptr(&lk->lk_lock, x,
  804                                     x | LK_EXCLUSIVE_SPINNERS))
  805                                         continue;
  806                                 if (flags & LK_INTERLOCK) {
  807                                         class->lc_unlock(ilk);
  808                                         flags &= ~LK_INTERLOCK;
  809                                 }
  810                                 GIANT_SAVE();
  811                                 spintries++;
  812                                 for (i = 0; i < ALK_LOOPS; i++) {
  813                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  814                                                 CTR4(KTR_LOCK,
  815                                     "%s: shared spinning on %p with %u and %u",
  816                                                     __func__, lk, spintries, i);
  817                                         if ((lk->lk_lock &
  818                                             LK_EXCLUSIVE_SPINNERS) == 0)
  819                                                 break;
  820                                         cpu_spinwait();
  821                                 }
  822                                 GIANT_RESTORE();
  823                                 if (i != ALK_LOOPS)
  824                                         continue;
  825                         }
  826 #endif
  827 
  828                         /*
  829                          * Acquire the sleepqueue chain lock because we
  830                          * probabilly will need to manipulate waiters flags.
  831                          */
  832                         sleepq_lock(&lk->lock_object);
  833                         x = lk->lk_lock;
  834 
  835                         /*
  836                          * if the lock has been released while we spun on
  837                          * the sleepqueue chain lock just try again.
  838                          */
  839                         if (x == LK_UNLOCKED) {
  840                                 sleepq_release(&lk->lock_object);
  841                                 continue;
  842                         }
  843 
  844 #ifdef ADAPTIVE_LOCKMGRS
  845                         /*
  846                          * The current lock owner might have started executing
  847                          * on another CPU (or the lock could have changed
  848                          * owner) while we were waiting on the turnstile
  849                          * chain lock.  If so, drop the turnstile lock and try
  850                          * again.
  851                          */
  852                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  853                             LK_HOLDER(x) != LK_KERNPROC) {
  854                                 owner = (struct thread *)LK_HOLDER(x);
  855                                 if (TD_IS_RUNNING(owner)) {
  856                                         sleepq_release(&lk->lock_object);
  857                                         continue;
  858                                 }
  859                         }
  860 #endif
  861 
  862                         /*
  863                          * The lock can be in the state where there is a
  864                          * pending queue of waiters, but still no owner.
  865                          * This happens when the lock is contested and an
  866                          * owner is going to claim the lock.
  867                          * If curthread is the one successfully acquiring it
  868                          * claim lock ownership and return, preserving waiters
  869                          * flags.
  870                          */
  871                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  872                         if ((x & ~v) == LK_UNLOCKED) {
  873                                 v &= ~LK_EXCLUSIVE_SPINNERS;
  874                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  875                                     tid | v)) {
  876                                         sleepq_release(&lk->lock_object);
  877                                         LOCK_LOG2(lk,
  878                                             "%s: %p claimed by a new writer",
  879                                             __func__, lk);
  880                                         break;
  881                                 }
  882                                 sleepq_release(&lk->lock_object);
  883                                 continue;
  884                         }
  885 
  886                         /*
  887                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
  888                          * fail, loop back and retry.
  889                          */
  890                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  891                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  892                                     x | LK_EXCLUSIVE_WAITERS)) {
  893                                         sleepq_release(&lk->lock_object);
  894                                         continue;
  895                                 }
  896                                 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  897                                     __func__, lk);
  898                         }
  899 
  900                         /*
  901                          * As far as we have been unable to acquire the
  902                          * exclusive lock and the exclusive waiters flag
  903                          * is set, we will sleep.
  904                          */
  905                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  906                             SQ_EXCLUSIVE_QUEUE);
  907                         flags &= ~LK_INTERLOCK;
  908                         if (error) {
  909                                 LOCK_LOG3(lk,
  910                                     "%s: interrupted sleep for %p with %d",
  911                                     __func__, lk, error);
  912                                 break;
  913                         }
  914                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  915                             __func__, lk);
  916                 }
  917                 if (error == 0) {
  918                         lock_profile_obtain_lock_success(&lk->lock_object,
  919                             contested, waittime, file, line);
  920                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  921                             lk->lk_recurse, file, line);
  922                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  923                             LK_TRYWIT(flags), file, line);
  924                         TD_LOCKS_INC(curthread);
  925                         STACK_SAVE(lk);
  926                 }
  927                 break;
  928         case LK_DOWNGRADE:
  929                 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  930                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  931                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  932                 TD_SLOCKS_INC(curthread);
  933 
  934                 /*
  935                  * In order to preserve waiters flags, just spin.
  936                  */
  937                 for (;;) {
  938                         x = lk->lk_lock;
  939                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  940                         x &= LK_ALL_WAITERS;
  941                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  942                             LK_SHARERS_LOCK(1) | x))
  943                                 break;
  944                         cpu_spinwait();
  945                 }
  946                 break;
  947         case LK_RELEASE:
  948                 _lockmgr_assert(lk, KA_LOCKED, file, line);
  949                 x = lk->lk_lock;
  950 
  951                 if ((x & LK_SHARE) == 0) {
  952 
  953                         /*
  954                          * As first option, treact the lock as if it has not
  955                          * any waiter.
  956                          * Fix-up the tid var if the lock has been disowned.
  957                          */
  958                         if (LK_HOLDER(x) == LK_KERNPROC)
  959                                 tid = LK_KERNPROC;
  960                         else {
  961                                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  962                                     file, line);
  963                                 TD_LOCKS_DEC(curthread);
  964                         }
  965                         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  966                             lk->lk_recurse, file, line);
  967 
  968                         /*
  969                          * The lock is held in exclusive mode.
  970                          * If the lock is recursed also, then unrecurse it.
  971                          */
  972                         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  973                                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  974                                     lk);
  975                                 lk->lk_recurse--;
  976                                 break;
  977                         }
  978                         if (tid != LK_KERNPROC)
  979                                 lock_profile_release_lock(&lk->lock_object);
  980 
  981                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  982                             LK_UNLOCKED))
  983                                 break;
  984 
  985                         sleepq_lock(&lk->lock_object);
  986                         x = lk->lk_lock;
  987                         v = LK_UNLOCKED;
  988 
  989                         /*
  990                          * If the lock has exclusive waiters, give them
  991                          * preference in order to avoid deadlock with
  992                          * shared runners up.
  993                          * If interruptible sleeps left the exclusive queue
  994                          * empty avoid a starvation for the threads sleeping
  995                          * on the shared queue by giving them precedence
  996                          * and cleaning up the exclusive waiters bit anyway.
  997                          * Please note that lk_exslpfail count may be lying
  998                          * about the real number of waiters with the
  999                          * LK_SLEEPFAIL flag on because they may be used in
 1000                          * conjuction with interruptible sleeps so
 1001                          * lk_exslpfail might be considered an 'upper limit'
 1002                          * bound, including the edge cases.
 1003                          */
 1004                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1005                         realexslp = sleepq_sleepcnt(&lk->lock_object,
 1006                             SQ_EXCLUSIVE_QUEUE);
 1007                         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
 1008                                 if (lk->lk_exslpfail < realexslp) {
 1009                                         lk->lk_exslpfail = 0;
 1010                                         queue = SQ_EXCLUSIVE_QUEUE;
 1011                                         v |= (x & LK_SHARED_WAITERS);
 1012                                 } else {
 1013                                         lk->lk_exslpfail = 0;
 1014                                         LOCK_LOG2(lk,
 1015                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1016                                             __func__, lk);
 1017                                         LOCK_LOG2(lk,
 1018                         "%s: %p waking up threads on the exclusive queue",
 1019                                             __func__, lk);
 1020                                         wakeup_swapper =
 1021                                             sleepq_broadcast(&lk->lock_object,
 1022                                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
 1023                                         queue = SQ_SHARED_QUEUE;
 1024                                 }
 1025                         } else {
 1026 
 1027                                 /*
 1028                                  * Exclusive waiters sleeping with LK_SLEEPFAIL
 1029                                  * on and using interruptible sleeps/timeout
 1030                                  * may have left spourious lk_exslpfail counts
 1031                                  * on, so clean it up anyway. 
 1032                                  */
 1033                                 lk->lk_exslpfail = 0;
 1034                                 queue = SQ_SHARED_QUEUE;
 1035                         }
 1036 
 1037                         LOCK_LOG3(lk,
 1038                             "%s: %p waking up threads on the %s queue",
 1039                             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
 1040                             "exclusive");
 1041                         atomic_store_rel_ptr(&lk->lk_lock, v);
 1042                         wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
 1043                             SLEEPQ_LK, 0, queue);
 1044                         sleepq_release(&lk->lock_object);
 1045                         break;
 1046                 } else
 1047                         wakeup_swapper = wakeupshlk(lk, file, line);
 1048                 break;
 1049         case LK_DRAIN:
 1050                 if (LK_CAN_WITNESS(flags))
 1051                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
 1052                             LOP_EXCLUSIVE, file, line, ilk);
 1053 
 1054                 /*
 1055                  * Trying to drain a lock we already own will result in a
 1056                  * deadlock.
 1057                  */
 1058                 if (lockmgr_xlocked(lk)) {
 1059                         if (flags & LK_INTERLOCK)
 1060                                 class->lc_unlock(ilk);
 1061                         panic("%s: draining %s with the lock held @ %s:%d\n",
 1062                             __func__, iwmesg, file, line);
 1063                 }
 1064 
 1065                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
 1066 #ifdef HWPMC_HOOKS
 1067                         PMC_SOFT_CALL( , , lock, failed);
 1068 #endif
 1069                         lock_profile_obtain_lock_failed(&lk->lock_object,
 1070                             &contested, &waittime);
 1071 
 1072                         /*
 1073                          * If the lock is expected to not sleep just give up
 1074                          * and return.
 1075                          */
 1076                         if (LK_TRYOP(flags)) {
 1077                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
 1078                                     __func__, lk);
 1079                                 error = EBUSY;
 1080                                 break;
 1081                         }
 1082 
 1083                         /*
 1084                          * Acquire the sleepqueue chain lock because we
 1085                          * probabilly will need to manipulate waiters flags.
 1086                          */
 1087                         sleepq_lock(&lk->lock_object);
 1088                         x = lk->lk_lock;
 1089 
 1090                         /*
 1091                          * if the lock has been released while we spun on
 1092                          * the sleepqueue chain lock just try again.
 1093                          */
 1094                         if (x == LK_UNLOCKED) {
 1095                                 sleepq_release(&lk->lock_object);
 1096                                 continue;
 1097                         }
 1098 
 1099                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
 1100                         if ((x & ~v) == LK_UNLOCKED) {
 1101                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
 1102 
 1103                                 /*
 1104                                  * If interruptible sleeps left the exclusive
 1105                                  * queue empty avoid a starvation for the
 1106                                  * threads sleeping on the shared queue by
 1107                                  * giving them precedence and cleaning up the
 1108                                  * exclusive waiters bit anyway.
 1109                                  * Please note that lk_exslpfail count may be
 1110                                  * lying about the real number of waiters with
 1111                                  * the LK_SLEEPFAIL flag on because they may
 1112                                  * be used in conjuction with interruptible
 1113                                  * sleeps so lk_exslpfail might be considered
 1114                                  * an 'upper limit' bound, including the edge
 1115                                  * cases.
 1116                                  */
 1117                                 if (v & LK_EXCLUSIVE_WAITERS) {
 1118                                         queue = SQ_EXCLUSIVE_QUEUE;
 1119                                         v &= ~LK_EXCLUSIVE_WAITERS;
 1120                                 } else {
 1121 
 1122                                         /*
 1123                                          * Exclusive waiters sleeping with
 1124                                          * LK_SLEEPFAIL on and using
 1125                                          * interruptible sleeps/timeout may
 1126                                          * have left spourious lk_exslpfail
 1127                                          * counts on, so clean it up anyway.
 1128                                          */
 1129                                         MPASS(v & LK_SHARED_WAITERS);
 1130                                         lk->lk_exslpfail = 0;
 1131                                         queue = SQ_SHARED_QUEUE;
 1132                                         v &= ~LK_SHARED_WAITERS;
 1133                                 }
 1134                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
 1135                                         realexslp =
 1136                                             sleepq_sleepcnt(&lk->lock_object,
 1137                                             SQ_EXCLUSIVE_QUEUE);
 1138                                         if (lk->lk_exslpfail >= realexslp) {
 1139                                                 lk->lk_exslpfail = 0;
 1140                                                 queue = SQ_SHARED_QUEUE;
 1141                                                 v &= ~LK_SHARED_WAITERS;
 1142                                                 if (realexslp != 0) {
 1143                                                         LOCK_LOG2(lk,
 1144                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1145                                                             __func__, lk);
 1146                                                         LOCK_LOG2(lk,
 1147                         "%s: %p waking up threads on the exclusive queue",
 1148                                                             __func__, lk);
 1149                                                         wakeup_swapper =
 1150                                                             sleepq_broadcast(
 1151                                                             &lk->lock_object,
 1152                                                             SLEEPQ_LK, 0,
 1153                                                             SQ_EXCLUSIVE_QUEUE);
 1154                                                 }
 1155                                         } else
 1156                                                 lk->lk_exslpfail = 0;
 1157                                 }
 1158                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
 1159                                         sleepq_release(&lk->lock_object);
 1160                                         continue;
 1161                                 }
 1162                                 LOCK_LOG3(lk,
 1163                                 "%s: %p waking up all threads on the %s queue",
 1164                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
 1165                                     "shared" : "exclusive");
 1166                                 wakeup_swapper |= sleepq_broadcast(
 1167                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
 1168 
 1169                                 /*
 1170                                  * If shared waiters have been woken up we need
 1171                                  * to wait for one of them to acquire the lock
 1172                                  * before to set the exclusive waiters in
 1173                                  * order to avoid a deadlock.
 1174                                  */
 1175                                 if (queue == SQ_SHARED_QUEUE) {
 1176                                         for (v = lk->lk_lock;
 1177                                             (v & LK_SHARE) && !LK_SHARERS(v);
 1178                                             v = lk->lk_lock)
 1179                                                 cpu_spinwait();
 1180                                 }
 1181                         }
 1182 
 1183                         /*
 1184                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
 1185                          * fail, loop back and retry.
 1186                          */
 1187                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
 1188                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
 1189                                     x | LK_EXCLUSIVE_WAITERS)) {
 1190                                         sleepq_release(&lk->lock_object);
 1191                                         continue;
 1192                                 }
 1193                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
 1194                                     __func__, lk);
 1195                         }
 1196 
 1197                         /*
 1198                          * As far as we have been unable to acquire the
 1199                          * exclusive lock and the exclusive waiters flag
 1200                          * is set, we will sleep.
 1201                          */
 1202                         if (flags & LK_INTERLOCK) {
 1203                                 class->lc_unlock(ilk);
 1204                                 flags &= ~LK_INTERLOCK;
 1205                         }
 1206                         GIANT_SAVE();
 1207                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
 1208                             SQ_EXCLUSIVE_QUEUE);
 1209                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
 1210                         GIANT_RESTORE();
 1211                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
 1212                             __func__, lk);
 1213                 }
 1214 
 1215                 if (error == 0) {
 1216                         lock_profile_obtain_lock_success(&lk->lock_object,
 1217                             contested, waittime, file, line);
 1218                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
 1219                             lk->lk_recurse, file, line);
 1220                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
 1221                             LK_TRYWIT(flags), file, line);
 1222                         TD_LOCKS_INC(curthread);
 1223                         STACK_SAVE(lk);
 1224                 }
 1225                 break;
 1226         default:
 1227                 if (flags & LK_INTERLOCK)
 1228                         class->lc_unlock(ilk);
 1229                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
 1230         }
 1231 
 1232         if (flags & LK_INTERLOCK)
 1233                 class->lc_unlock(ilk);
 1234         if (wakeup_swapper)
 1235                 kick_proc0();
 1236 
 1237         return (error);
 1238 }
 1239 
 1240 void
 1241 _lockmgr_disown(struct lock *lk, const char *file, int line)
 1242 {
 1243         uintptr_t tid, x;
 1244 
 1245         if (SCHEDULER_STOPPED())
 1246                 return;
 1247 
 1248         tid = (uintptr_t)curthread;
 1249         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
 1250 
 1251         /*
 1252          * If the owner is already LK_KERNPROC just skip the whole operation.
 1253          */
 1254         if (LK_HOLDER(lk->lk_lock) != tid)
 1255                 return;
 1256         lock_profile_release_lock(&lk->lock_object);
 1257         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
 1258         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
 1259         TD_LOCKS_DEC(curthread);
 1260         STACK_SAVE(lk);
 1261 
 1262         /*
 1263          * In order to preserve waiters flags, just spin.
 1264          */
 1265         for (;;) {
 1266                 x = lk->lk_lock;
 1267                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1268                 x &= LK_ALL_WAITERS;
 1269                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 1270                     LK_KERNPROC | x))
 1271                         return;
 1272                 cpu_spinwait();
 1273         }
 1274 }
 1275 
 1276 void
 1277 lockmgr_printinfo(struct lock *lk)
 1278 {
 1279         struct thread *td;
 1280         uintptr_t x;
 1281 
 1282         if (lk->lk_lock == LK_UNLOCKED)
 1283                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
 1284         else if (lk->lk_lock & LK_SHARE)
 1285                 printf("lock type %s: SHARED (count %ju)\n",
 1286                     lk->lock_object.lo_name,
 1287                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1288         else {
 1289                 td = lockmgr_xholder(lk);
 1290                 printf("lock type %s: EXCL by thread %p (pid %d)\n",
 1291                     lk->lock_object.lo_name, td, td->td_proc->p_pid);
 1292         }
 1293 
 1294         x = lk->lk_lock;
 1295         if (x & LK_EXCLUSIVE_WAITERS)
 1296                 printf(" with exclusive waiters pending\n");
 1297         if (x & LK_SHARED_WAITERS)
 1298                 printf(" with shared waiters pending\n");
 1299         if (x & LK_EXCLUSIVE_SPINNERS)
 1300                 printf(" with exclusive spinners pending\n");
 1301 
 1302         STACK_PRINT(lk);
 1303 }
 1304 
 1305 int
 1306 lockstatus(struct lock *lk)
 1307 {
 1308         uintptr_t v, x;
 1309         int ret;
 1310 
 1311         ret = LK_SHARED;
 1312         x = lk->lk_lock;
 1313         v = LK_HOLDER(x);
 1314 
 1315         if ((x & LK_SHARE) == 0) {
 1316                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
 1317                         ret = LK_EXCLUSIVE;
 1318                 else
 1319                         ret = LK_EXCLOTHER;
 1320         } else if (x == LK_UNLOCKED)
 1321                 ret = 0;
 1322 
 1323         return (ret);
 1324 }
 1325 
 1326 #ifdef INVARIANT_SUPPORT
 1327 
 1328 FEATURE(invariant_support,
 1329     "Support for modules compiled with INVARIANTS option");
 1330 
 1331 #ifndef INVARIANTS
 1332 #undef  _lockmgr_assert
 1333 #endif
 1334 
 1335 void
 1336 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
 1337 {
 1338         int slocked = 0;
 1339 
 1340         if (panicstr != NULL)
 1341                 return;
 1342         switch (what) {
 1343         case KA_SLOCKED:
 1344         case KA_SLOCKED | KA_NOTRECURSED:
 1345         case KA_SLOCKED | KA_RECURSED:
 1346                 slocked = 1;
 1347         case KA_LOCKED:
 1348         case KA_LOCKED | KA_NOTRECURSED:
 1349         case KA_LOCKED | KA_RECURSED:
 1350 #ifdef WITNESS
 1351 
 1352                 /*
 1353                  * We cannot trust WITNESS if the lock is held in exclusive
 1354                  * mode and a call to lockmgr_disown() happened.
 1355                  * Workaround this skipping the check if the lock is held in
 1356                  * exclusive mode even for the KA_LOCKED case.
 1357                  */
 1358                 if (slocked || (lk->lk_lock & LK_SHARE)) {
 1359                         witness_assert(&lk->lock_object, what, file, line);
 1360                         break;
 1361                 }
 1362 #endif
 1363                 if (lk->lk_lock == LK_UNLOCKED ||
 1364                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
 1365                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
 1366                         panic("Lock %s not %slocked @ %s:%d\n",
 1367                             lk->lock_object.lo_name, slocked ? "share" : "",
 1368                             file, line);
 1369 
 1370                 if ((lk->lk_lock & LK_SHARE) == 0) {
 1371                         if (lockmgr_recursed(lk)) {
 1372                                 if (what & KA_NOTRECURSED)
 1373                                         panic("Lock %s recursed @ %s:%d\n",
 1374                                             lk->lock_object.lo_name, file,
 1375                                             line);
 1376                         } else if (what & KA_RECURSED)
 1377                                 panic("Lock %s not recursed @ %s:%d\n",
 1378                                     lk->lock_object.lo_name, file, line);
 1379                 }
 1380                 break;
 1381         case KA_XLOCKED:
 1382         case KA_XLOCKED | KA_NOTRECURSED:
 1383         case KA_XLOCKED | KA_RECURSED:
 1384                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
 1385                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1386                             lk->lock_object.lo_name, file, line);
 1387                 if (lockmgr_recursed(lk)) {
 1388                         if (what & KA_NOTRECURSED)
 1389                                 panic("Lock %s recursed @ %s:%d\n",
 1390                                     lk->lock_object.lo_name, file, line);
 1391                 } else if (what & KA_RECURSED)
 1392                         panic("Lock %s not recursed @ %s:%d\n",
 1393                             lk->lock_object.lo_name, file, line);
 1394                 break;
 1395         case KA_UNLOCKED:
 1396                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
 1397                         panic("Lock %s exclusively locked @ %s:%d\n",
 1398                             lk->lock_object.lo_name, file, line);
 1399                 break;
 1400         default:
 1401                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
 1402                     line);
 1403         }
 1404 }
 1405 #endif
 1406 
 1407 #ifdef DDB
 1408 int
 1409 lockmgr_chain(struct thread *td, struct thread **ownerp)
 1410 {
 1411         struct lock *lk;
 1412 
 1413         lk = td->td_wchan;
 1414 
 1415         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
 1416                 return (0);
 1417         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
 1418         if (lk->lk_lock & LK_SHARE)
 1419                 db_printf("SHARED (count %ju)\n",
 1420                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1421         else
 1422                 db_printf("EXCL\n");
 1423         *ownerp = lockmgr_xholder(lk);
 1424 
 1425         return (1);
 1426 }
 1427 
 1428 static void
 1429 db_show_lockmgr(struct lock_object *lock)
 1430 {
 1431         struct thread *td;
 1432         struct lock *lk;
 1433 
 1434         lk = (struct lock *)lock;
 1435 
 1436         db_printf(" state: ");
 1437         if (lk->lk_lock == LK_UNLOCKED)
 1438                 db_printf("UNLOCKED\n");
 1439         else if (lk->lk_lock & LK_SHARE)
 1440                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
 1441         else {
 1442                 td = lockmgr_xholder(lk);
 1443                 if (td == (struct thread *)LK_KERNPROC)
 1444                         db_printf("XLOCK: LK_KERNPROC\n");
 1445                 else
 1446                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1447                             td->td_tid, td->td_proc->p_pid,
 1448                             td->td_proc->p_comm);
 1449                 if (lockmgr_recursed(lk))
 1450                         db_printf(" recursed: %d\n", lk->lk_recurse);
 1451         }
 1452         db_printf(" waiters: ");
 1453         switch (lk->lk_lock & LK_ALL_WAITERS) {
 1454         case LK_SHARED_WAITERS:
 1455                 db_printf("shared\n");
 1456                 break;
 1457         case LK_EXCLUSIVE_WAITERS:
 1458                 db_printf("exclusive\n");
 1459                 break;
 1460         case LK_ALL_WAITERS:
 1461                 db_printf("shared and exclusive\n");
 1462                 break;
 1463         default:
 1464                 db_printf("none\n");
 1465         }
 1466         db_printf(" spinners: ");
 1467         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
 1468                 db_printf("exclusive\n");
 1469         else
 1470                 db_printf("none\n");
 1471 }
 1472 #endif

Cache object: 7ec3f373c2f2b9c9ab2beaeea15c9d23


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.