The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include "opt_adaptive_lockmgrs.h"
   30 #include "opt_ddb.h"
   31 #include "opt_kdtrace.h"
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/9.0/sys/kern/kern_lock.c 226255 2011-10-11 13:45:04Z attilio $");
   35 
   36 #include <sys/param.h>
   37 #include <sys/ktr.h>
   38 #include <sys/lock.h>
   39 #include <sys/lock_profile.h>
   40 #include <sys/lockmgr.h>
   41 #include <sys/mutex.h>
   42 #include <sys/proc.h>
   43 #include <sys/sleepqueue.h>
   44 #ifdef DEBUG_LOCKS
   45 #include <sys/stack.h>
   46 #endif
   47 #include <sys/sysctl.h>
   48 #include <sys/systm.h>
   49 
   50 #include <machine/cpu.h>
   51 
   52 #ifdef DDB
   53 #include <ddb/ddb.h>
   54 #endif
   55 
   56 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
   57     (LK_ADAPTIVE | LK_NOSHARE));
   58 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
   59     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
   60 
   61 #define SQ_EXCLUSIVE_QUEUE      0
   62 #define SQ_SHARED_QUEUE         1
   63 
   64 #ifdef ADAPTIVE_LOCKMGRS
   65 #define ALK_RETRIES             10
   66 #define ALK_LOOPS               10000
   67 #endif
   68 
   69 #ifndef INVARIANTS
   70 #define _lockmgr_assert(lk, what, file, line)
   71 #define TD_LOCKS_INC(td)
   72 #define TD_LOCKS_DEC(td)
   73 #else
   74 #define TD_LOCKS_INC(td)        ((td)->td_locks++)
   75 #define TD_LOCKS_DEC(td)        ((td)->td_locks--)
   76 #endif
   77 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
   78 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
   79 
   80 #ifndef DEBUG_LOCKS
   81 #define STACK_PRINT(lk)
   82 #define STACK_SAVE(lk)
   83 #define STACK_ZERO(lk)
   84 #else
   85 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
   86 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
   87 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
   88 #endif
   89 
   90 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
   91         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   92                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
   93 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
   94         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   95                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
   96 
   97 #define GIANT_DECLARE                                                   \
   98         int _i = 0;                                                     \
   99         WITNESS_SAVE_DECL(Giant)
  100 #define GIANT_RESTORE() do {                                            \
  101         if (_i > 0) {                                                   \
  102                 while (_i--)                                            \
  103                         mtx_lock(&Giant);                               \
  104                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  105         }                                                               \
  106 } while (0)
  107 #define GIANT_SAVE() do {                                               \
  108         if (mtx_owned(&Giant)) {                                        \
  109                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
  110                 while (mtx_owned(&Giant)) {                             \
  111                         _i++;                                           \
  112                         mtx_unlock(&Giant);                             \
  113                 }                                                       \
  114         }                                                               \
  115 } while (0)
  116 
  117 #define LK_CAN_SHARE(x)                                                 \
  118         (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||      \
  119         ((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||                           \
  120         curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  121 #define LK_TRYOP(x)                                                     \
  122         ((x) & LK_NOWAIT)
  123 
  124 #define LK_CAN_WITNESS(x)                                               \
  125         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  126 #define LK_TRYWIT(x)                                                    \
  127         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  128 
  129 #define LK_CAN_ADAPT(lk, f)                                             \
  130         (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&             \
  131         ((f) & LK_SLEEPFAIL) == 0)
  132 
  133 #define lockmgr_disowned(lk)                                            \
  134         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  135 
  136 #define lockmgr_xlocked(lk)                                             \
  137         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  138 
  139 static void      assert_lockmgr(struct lock_object *lock, int how);
  140 #ifdef DDB
  141 static void      db_show_lockmgr(struct lock_object *lock);
  142 #endif
  143 static void      lock_lockmgr(struct lock_object *lock, int how);
  144 #ifdef KDTRACE_HOOKS
  145 static int       owner_lockmgr(struct lock_object *lock, struct thread **owner);
  146 #endif
  147 static int       unlock_lockmgr(struct lock_object *lock);
  148 
  149 struct lock_class lock_class_lockmgr = {
  150         .lc_name = "lockmgr",
  151         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  152         .lc_assert = assert_lockmgr,
  153 #ifdef DDB
  154         .lc_ddb_show = db_show_lockmgr,
  155 #endif
  156         .lc_lock = lock_lockmgr,
  157         .lc_unlock = unlock_lockmgr,
  158 #ifdef KDTRACE_HOOKS
  159         .lc_owner = owner_lockmgr,
  160 #endif
  161 };
  162 
  163 static __inline struct thread *
  164 lockmgr_xholder(struct lock *lk)
  165 {
  166         uintptr_t x;
  167 
  168         x = lk->lk_lock;
  169         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  170 }
  171 
  172 /*
  173  * It assumes sleepq_lock held and returns with this one unheld.
  174  * It also assumes the generic interlock is sane and previously checked.
  175  * If LK_INTERLOCK is specified the interlock is not reacquired after the
  176  * sleep.
  177  */
  178 static __inline int
  179 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  180     const char *wmesg, int pri, int timo, int queue)
  181 {
  182         GIANT_DECLARE;
  183         struct lock_class *class;
  184         int catch, error;
  185 
  186         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  187         catch = pri & PCATCH;
  188         pri &= PRIMASK;
  189         error = 0;
  190 
  191         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  192             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  193 
  194         if (flags & LK_INTERLOCK)
  195                 class->lc_unlock(ilk);
  196         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
  197                 lk->lk_exslpfail++;
  198         GIANT_SAVE();
  199         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  200             SLEEPQ_INTERRUPTIBLE : 0), queue);
  201         if ((flags & LK_TIMELOCK) && timo)
  202                 sleepq_set_timeout(&lk->lock_object, timo);
  203 
  204         /*
  205          * Decisional switch for real sleeping.
  206          */
  207         if ((flags & LK_TIMELOCK) && timo && catch)
  208                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
  209         else if ((flags & LK_TIMELOCK) && timo)
  210                 error = sleepq_timedwait(&lk->lock_object, pri);
  211         else if (catch)
  212                 error = sleepq_wait_sig(&lk->lock_object, pri);
  213         else
  214                 sleepq_wait(&lk->lock_object, pri);
  215         GIANT_RESTORE();
  216         if ((flags & LK_SLEEPFAIL) && error == 0)
  217                 error = ENOLCK;
  218 
  219         return (error);
  220 }
  221 
  222 static __inline int
  223 wakeupshlk(struct lock *lk, const char *file, int line)
  224 {
  225         uintptr_t v, x;
  226         u_int realexslp;
  227         int queue, wakeup_swapper;
  228 
  229         TD_LOCKS_DEC(curthread);
  230         TD_SLOCKS_DEC(curthread);
  231         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  232         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  233 
  234         wakeup_swapper = 0;
  235         for (;;) {
  236                 x = lk->lk_lock;
  237 
  238                 /*
  239                  * If there is more than one shared lock held, just drop one
  240                  * and return.
  241                  */
  242                 if (LK_SHARERS(x) > 1) {
  243                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  244                             x - LK_ONE_SHARER))
  245                                 break;
  246                         continue;
  247                 }
  248 
  249                 /*
  250                  * If there are not waiters on the exclusive queue, drop the
  251                  * lock quickly.
  252                  */
  253                 if ((x & LK_ALL_WAITERS) == 0) {
  254                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  255                             LK_SHARERS_LOCK(1));
  256                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  257                                 break;
  258                         continue;
  259                 }
  260 
  261                 /*
  262                  * We should have a sharer with waiters, so enter the hard
  263                  * path in order to handle wakeups correctly.
  264                  */
  265                 sleepq_lock(&lk->lock_object);
  266                 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  267                 v = LK_UNLOCKED;
  268 
  269                 /*
  270                  * If the lock has exclusive waiters, give them preference in
  271                  * order to avoid deadlock with shared runners up.
  272                  * If interruptible sleeps left the exclusive queue empty
  273                  * avoid a starvation for the threads sleeping on the shared
  274                  * queue by giving them precedence and cleaning up the
  275                  * exclusive waiters bit anyway.
  276                  * Please note that lk_exslpfail count may be lying about
  277                  * the real number of waiters with the LK_SLEEPFAIL flag on
  278                  * because they may be used in conjuction with interruptible
  279                  * sleeps so lk_exslpfail might be considered an 'upper limit'
  280                  * bound, including the edge cases.
  281                  */
  282                 realexslp = sleepq_sleepcnt(&lk->lock_object,
  283                     SQ_EXCLUSIVE_QUEUE);
  284                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  285                         if (lk->lk_exslpfail < realexslp) {
  286                                 lk->lk_exslpfail = 0;
  287                                 queue = SQ_EXCLUSIVE_QUEUE;
  288                                 v |= (x & LK_SHARED_WAITERS);
  289                         } else {
  290                                 lk->lk_exslpfail = 0;
  291                                 LOCK_LOG2(lk,
  292                                     "%s: %p has only LK_SLEEPFAIL sleepers",
  293                                     __func__, lk);
  294                                 LOCK_LOG2(lk,
  295                             "%s: %p waking up threads on the exclusive queue",
  296                                     __func__, lk);
  297                                 wakeup_swapper =
  298                                     sleepq_broadcast(&lk->lock_object,
  299                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
  300                                 queue = SQ_SHARED_QUEUE;
  301                         }
  302                                 
  303                 } else {
  304 
  305                         /*
  306                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
  307                          * and using interruptible sleeps/timeout may have
  308                          * left spourious lk_exslpfail counts on, so clean
  309                          * it up anyway.
  310                          */
  311                         lk->lk_exslpfail = 0;
  312                         queue = SQ_SHARED_QUEUE;
  313                 }
  314 
  315                 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  316                     v)) {
  317                         sleepq_release(&lk->lock_object);
  318                         continue;
  319                 }
  320                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  321                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  322                     "exclusive");
  323                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  324                     0, queue);
  325                 sleepq_release(&lk->lock_object);
  326                 break;
  327         }
  328 
  329         lock_profile_release_lock(&lk->lock_object);
  330         return (wakeup_swapper);
  331 }
  332 
  333 static void
  334 assert_lockmgr(struct lock_object *lock, int what)
  335 {
  336 
  337         panic("lockmgr locks do not support assertions");
  338 }
  339 
  340 static void
  341 lock_lockmgr(struct lock_object *lock, int how)
  342 {
  343 
  344         panic("lockmgr locks do not support sleep interlocking");
  345 }
  346 
  347 static int
  348 unlock_lockmgr(struct lock_object *lock)
  349 {
  350 
  351         panic("lockmgr locks do not support sleep interlocking");
  352 }
  353 
  354 #ifdef KDTRACE_HOOKS
  355 static int
  356 owner_lockmgr(struct lock_object *lock, struct thread **owner)
  357 {
  358 
  359         panic("lockmgr locks do not support owner inquiring");
  360 }
  361 #endif
  362 
  363 void
  364 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  365 {
  366         int iflags;
  367 
  368         MPASS((flags & ~LK_INIT_MASK) == 0);
  369         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  370             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  371             &lk->lk_lock));
  372 
  373         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  374         if (flags & LK_CANRECURSE)
  375                 iflags |= LO_RECURSABLE;
  376         if ((flags & LK_NODUP) == 0)
  377                 iflags |= LO_DUPOK;
  378         if (flags & LK_NOPROFILE)
  379                 iflags |= LO_NOPROFILE;
  380         if ((flags & LK_NOWITNESS) == 0)
  381                 iflags |= LO_WITNESS;
  382         if (flags & LK_QUIET)
  383                 iflags |= LO_QUIET;
  384         iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  385 
  386         lk->lk_lock = LK_UNLOCKED;
  387         lk->lk_recurse = 0;
  388         lk->lk_exslpfail = 0;
  389         lk->lk_timo = timo;
  390         lk->lk_pri = pri;
  391         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  392         STACK_ZERO(lk);
  393 }
  394 
  395 /*
  396  * XXX: Gross hacks to manipulate external lock flags after
  397  * initialization.  Used for certain vnode and buf locks.
  398  */
  399 void
  400 lockallowshare(struct lock *lk)
  401 {
  402 
  403         lockmgr_assert(lk, KA_XLOCKED);
  404         lk->lock_object.lo_flags &= ~LK_NOSHARE;
  405 }
  406 
  407 void
  408 lockallowrecurse(struct lock *lk)
  409 {
  410 
  411         lockmgr_assert(lk, KA_XLOCKED);
  412         lk->lock_object.lo_flags |= LO_RECURSABLE;
  413 }
  414 
  415 void
  416 lockdisablerecurse(struct lock *lk)
  417 {
  418 
  419         lockmgr_assert(lk, KA_XLOCKED);
  420         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
  421 }
  422 
  423 void
  424 lockdestroy(struct lock *lk)
  425 {
  426 
  427         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  428         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  429         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
  430         lock_destroy(&lk->lock_object);
  431 }
  432 
  433 int
  434 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  435     const char *wmesg, int pri, int timo, const char *file, int line)
  436 {
  437         GIANT_DECLARE;
  438         struct lock_class *class;
  439         const char *iwmesg;
  440         uintptr_t tid, v, x;
  441         u_int op, realexslp;
  442         int error, ipri, itimo, queue, wakeup_swapper;
  443 #ifdef LOCK_PROFILING
  444         uint64_t waittime = 0;
  445         int contested = 0;
  446 #endif
  447 #ifdef ADAPTIVE_LOCKMGRS
  448         volatile struct thread *owner;
  449         u_int i, spintries = 0;
  450 #endif
  451 
  452         error = 0;
  453         tid = (uintptr_t)curthread;
  454         op = (flags & LK_TYPE_MASK);
  455         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  456         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  457         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  458 
  459         MPASS((flags & ~LK_TOTAL_MASK) == 0);
  460         KASSERT((op & (op - 1)) == 0,
  461             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  462         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  463             (op != LK_DOWNGRADE && op != LK_RELEASE),
  464             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  465             __func__, file, line));
  466         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  467             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  468             __func__, file, line));
  469 
  470         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  471         if (panicstr != NULL) {
  472                 if (flags & LK_INTERLOCK)
  473                         class->lc_unlock(ilk);
  474                 return (0);
  475         }
  476 
  477         if (lk->lock_object.lo_flags & LK_NOSHARE) {
  478                 switch (op) {
  479                 case LK_SHARED:
  480                         op = LK_EXCLUSIVE;
  481                         break;
  482                 case LK_UPGRADE:
  483                 case LK_DOWNGRADE:
  484                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
  485                             file, line);
  486                         return (0);
  487                 }
  488         }
  489 
  490         wakeup_swapper = 0;
  491         switch (op) {
  492         case LK_SHARED:
  493                 if (LK_CAN_WITNESS(flags))
  494                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  495                             file, line, ilk);
  496                 for (;;) {
  497                         x = lk->lk_lock;
  498 
  499                         /*
  500                          * If no other thread has an exclusive lock, or
  501                          * no exclusive waiter is present, bump the count of
  502                          * sharers.  Since we have to preserve the state of
  503                          * waiters, if we fail to acquire the shared lock
  504                          * loop back and retry.
  505                          */
  506                         if (LK_CAN_SHARE(x)) {
  507                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  508                                     x + LK_ONE_SHARER))
  509                                         break;
  510                                 continue;
  511                         }
  512                         lock_profile_obtain_lock_failed(&lk->lock_object,
  513                             &contested, &waittime);
  514 
  515                         /*
  516                          * If the lock is already held by curthread in
  517                          * exclusive way avoid a deadlock.
  518                          */
  519                         if (LK_HOLDER(x) == tid) {
  520                                 LOCK_LOG2(lk,
  521                                     "%s: %p already held in exclusive mode",
  522                                     __func__, lk);
  523                                 error = EDEADLK;
  524                                 break;
  525                         }
  526 
  527                         /*
  528                          * If the lock is expected to not sleep just give up
  529                          * and return.
  530                          */
  531                         if (LK_TRYOP(flags)) {
  532                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  533                                     __func__, lk);
  534                                 error = EBUSY;
  535                                 break;
  536                         }
  537 
  538 #ifdef ADAPTIVE_LOCKMGRS
  539                         /*
  540                          * If the owner is running on another CPU, spin until
  541                          * the owner stops running or the state of the lock
  542                          * changes.  We need a double-state handle here
  543                          * because for a failed acquisition the lock can be
  544                          * either held in exclusive mode or shared mode
  545                          * (for the writer starvation avoidance technique).
  546                          */
  547                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  548                             LK_HOLDER(x) != LK_KERNPROC) {
  549                                 owner = (struct thread *)LK_HOLDER(x);
  550                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  551                                         CTR3(KTR_LOCK,
  552                                             "%s: spinning on %p held by %p",
  553                                             __func__, lk, owner);
  554 
  555                                 /*
  556                                  * If we are holding also an interlock drop it
  557                                  * in order to avoid a deadlock if the lockmgr
  558                                  * owner is adaptively spinning on the
  559                                  * interlock itself.
  560                                  */
  561                                 if (flags & LK_INTERLOCK) {
  562                                         class->lc_unlock(ilk);
  563                                         flags &= ~LK_INTERLOCK;
  564                                 }
  565                                 GIANT_SAVE();
  566                                 while (LK_HOLDER(lk->lk_lock) ==
  567                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  568                                         cpu_spinwait();
  569                                 GIANT_RESTORE();
  570                                 continue;
  571                         } else if (LK_CAN_ADAPT(lk, flags) &&
  572                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  573                             spintries < ALK_RETRIES) {
  574                                 if (flags & LK_INTERLOCK) {
  575                                         class->lc_unlock(ilk);
  576                                         flags &= ~LK_INTERLOCK;
  577                                 }
  578                                 GIANT_SAVE();
  579                                 spintries++;
  580                                 for (i = 0; i < ALK_LOOPS; i++) {
  581                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  582                                                 CTR4(KTR_LOCK,
  583                                     "%s: shared spinning on %p with %u and %u",
  584                                                     __func__, lk, spintries, i);
  585                                         x = lk->lk_lock;
  586                                         if ((x & LK_SHARE) == 0 ||
  587                                             LK_CAN_SHARE(x) != 0)
  588                                                 break;
  589                                         cpu_spinwait();
  590                                 }
  591                                 GIANT_RESTORE();
  592                                 if (i != ALK_LOOPS)
  593                                         continue;
  594                         }
  595 #endif
  596 
  597                         /*
  598                          * Acquire the sleepqueue chain lock because we
  599                          * probabilly will need to manipulate waiters flags.
  600                          */
  601                         sleepq_lock(&lk->lock_object);
  602                         x = lk->lk_lock;
  603 
  604                         /*
  605                          * if the lock can be acquired in shared mode, try
  606                          * again.
  607                          */
  608                         if (LK_CAN_SHARE(x)) {
  609                                 sleepq_release(&lk->lock_object);
  610                                 continue;
  611                         }
  612 
  613 #ifdef ADAPTIVE_LOCKMGRS
  614                         /*
  615                          * The current lock owner might have started executing
  616                          * on another CPU (or the lock could have changed
  617                          * owner) while we were waiting on the turnstile
  618                          * chain lock.  If so, drop the turnstile lock and try
  619                          * again.
  620                          */
  621                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  622                             LK_HOLDER(x) != LK_KERNPROC) {
  623                                 owner = (struct thread *)LK_HOLDER(x);
  624                                 if (TD_IS_RUNNING(owner)) {
  625                                         sleepq_release(&lk->lock_object);
  626                                         continue;
  627                                 }
  628                         }
  629 #endif
  630 
  631                         /*
  632                          * Try to set the LK_SHARED_WAITERS flag.  If we fail,
  633                          * loop back and retry.
  634                          */
  635                         if ((x & LK_SHARED_WAITERS) == 0) {
  636                                 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  637                                     x | LK_SHARED_WAITERS)) {
  638                                         sleepq_release(&lk->lock_object);
  639                                         continue;
  640                                 }
  641                                 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  642                                     __func__, lk);
  643                         }
  644 
  645                         /*
  646                          * As far as we have been unable to acquire the
  647                          * shared lock and the shared waiters flag is set,
  648                          * we will sleep.
  649                          */
  650                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  651                             SQ_SHARED_QUEUE);
  652                         flags &= ~LK_INTERLOCK;
  653                         if (error) {
  654                                 LOCK_LOG3(lk,
  655                                     "%s: interrupted sleep for %p with %d",
  656                                     __func__, lk, error);
  657                                 break;
  658                         }
  659                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  660                             __func__, lk);
  661                 }
  662                 if (error == 0) {
  663                         lock_profile_obtain_lock_success(&lk->lock_object,
  664                             contested, waittime, file, line);
  665                         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  666                             line);
  667                         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  668                             line);
  669                         TD_LOCKS_INC(curthread);
  670                         TD_SLOCKS_INC(curthread);
  671                         STACK_SAVE(lk);
  672                 }
  673                 break;
  674         case LK_UPGRADE:
  675                 _lockmgr_assert(lk, KA_SLOCKED, file, line);
  676                 v = lk->lk_lock;
  677                 x = v & LK_ALL_WAITERS;
  678                 v &= LK_EXCLUSIVE_SPINNERS;
  679 
  680                 /*
  681                  * Try to switch from one shared lock to an exclusive one.
  682                  * We need to preserve waiters flags during the operation.
  683                  */
  684                 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  685                     tid | x)) {
  686                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  687                             line);
  688                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  689                             LK_TRYWIT(flags), file, line);
  690                         TD_SLOCKS_DEC(curthread);
  691                         break;
  692                 }
  693 
  694                 /*
  695                  * We have been unable to succeed in upgrading, so just
  696                  * give up the shared lock.
  697                  */
  698                 wakeup_swapper |= wakeupshlk(lk, file, line);
  699 
  700                 /* FALLTHROUGH */
  701         case LK_EXCLUSIVE:
  702                 if (LK_CAN_WITNESS(flags))
  703                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  704                             LOP_EXCLUSIVE, file, line, ilk);
  705 
  706                 /*
  707                  * If curthread already holds the lock and this one is
  708                  * allowed to recurse, simply recurse on it.
  709                  */
  710                 if (lockmgr_xlocked(lk)) {
  711                         if ((flags & LK_CANRECURSE) == 0 &&
  712                             (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  713 
  714                                 /*
  715                                  * If the lock is expected to not panic just
  716                                  * give up and return.
  717                                  */
  718                                 if (LK_TRYOP(flags)) {
  719                                         LOCK_LOG2(lk,
  720                                             "%s: %p fails the try operation",
  721                                             __func__, lk);
  722                                         error = EBUSY;
  723                                         break;
  724                                 }
  725                                 if (flags & LK_INTERLOCK)
  726                                         class->lc_unlock(ilk);
  727                 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  728                                     __func__, iwmesg, file, line);
  729                         }
  730                         lk->lk_recurse++;
  731                         LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  732                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  733                             lk->lk_recurse, file, line);
  734                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  735                             LK_TRYWIT(flags), file, line);
  736                         TD_LOCKS_INC(curthread);
  737                         break;
  738                 }
  739 
  740                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  741                     tid)) {
  742                         lock_profile_obtain_lock_failed(&lk->lock_object,
  743                             &contested, &waittime);
  744 
  745                         /*
  746                          * If the lock is expected to not sleep just give up
  747                          * and return.
  748                          */
  749                         if (LK_TRYOP(flags)) {
  750                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  751                                     __func__, lk);
  752                                 error = EBUSY;
  753                                 break;
  754                         }
  755 
  756 #ifdef ADAPTIVE_LOCKMGRS
  757                         /*
  758                          * If the owner is running on another CPU, spin until
  759                          * the owner stops running or the state of the lock
  760                          * changes.
  761                          */
  762                         x = lk->lk_lock;
  763                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  764                             LK_HOLDER(x) != LK_KERNPROC) {
  765                                 owner = (struct thread *)LK_HOLDER(x);
  766                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  767                                         CTR3(KTR_LOCK,
  768                                             "%s: spinning on %p held by %p",
  769                                             __func__, lk, owner);
  770 
  771                                 /*
  772                                  * If we are holding also an interlock drop it
  773                                  * in order to avoid a deadlock if the lockmgr
  774                                  * owner is adaptively spinning on the
  775                                  * interlock itself.
  776                                  */
  777                                 if (flags & LK_INTERLOCK) {
  778                                         class->lc_unlock(ilk);
  779                                         flags &= ~LK_INTERLOCK;
  780                                 }
  781                                 GIANT_SAVE();
  782                                 while (LK_HOLDER(lk->lk_lock) ==
  783                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  784                                         cpu_spinwait();
  785                                 GIANT_RESTORE();
  786                                 continue;
  787                         } else if (LK_CAN_ADAPT(lk, flags) &&
  788                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  789                             spintries < ALK_RETRIES) {
  790                                 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  791                                     !atomic_cmpset_ptr(&lk->lk_lock, x,
  792                                     x | LK_EXCLUSIVE_SPINNERS))
  793                                         continue;
  794                                 if (flags & LK_INTERLOCK) {
  795                                         class->lc_unlock(ilk);
  796                                         flags &= ~LK_INTERLOCK;
  797                                 }
  798                                 GIANT_SAVE();
  799                                 spintries++;
  800                                 for (i = 0; i < ALK_LOOPS; i++) {
  801                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  802                                                 CTR4(KTR_LOCK,
  803                                     "%s: shared spinning on %p with %u and %u",
  804                                                     __func__, lk, spintries, i);
  805                                         if ((lk->lk_lock &
  806                                             LK_EXCLUSIVE_SPINNERS) == 0)
  807                                                 break;
  808                                         cpu_spinwait();
  809                                 }
  810                                 GIANT_RESTORE();
  811                                 if (i != ALK_LOOPS)
  812                                         continue;
  813                         }
  814 #endif
  815 
  816                         /*
  817                          * Acquire the sleepqueue chain lock because we
  818                          * probabilly will need to manipulate waiters flags.
  819                          */
  820                         sleepq_lock(&lk->lock_object);
  821                         x = lk->lk_lock;
  822 
  823                         /*
  824                          * if the lock has been released while we spun on
  825                          * the sleepqueue chain lock just try again.
  826                          */
  827                         if (x == LK_UNLOCKED) {
  828                                 sleepq_release(&lk->lock_object);
  829                                 continue;
  830                         }
  831 
  832 #ifdef ADAPTIVE_LOCKMGRS
  833                         /*
  834                          * The current lock owner might have started executing
  835                          * on another CPU (or the lock could have changed
  836                          * owner) while we were waiting on the turnstile
  837                          * chain lock.  If so, drop the turnstile lock and try
  838                          * again.
  839                          */
  840                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  841                             LK_HOLDER(x) != LK_KERNPROC) {
  842                                 owner = (struct thread *)LK_HOLDER(x);
  843                                 if (TD_IS_RUNNING(owner)) {
  844                                         sleepq_release(&lk->lock_object);
  845                                         continue;
  846                                 }
  847                         }
  848 #endif
  849 
  850                         /*
  851                          * The lock can be in the state where there is a
  852                          * pending queue of waiters, but still no owner.
  853                          * This happens when the lock is contested and an
  854                          * owner is going to claim the lock.
  855                          * If curthread is the one successfully acquiring it
  856                          * claim lock ownership and return, preserving waiters
  857                          * flags.
  858                          */
  859                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  860                         if ((x & ~v) == LK_UNLOCKED) {
  861                                 v &= ~LK_EXCLUSIVE_SPINNERS;
  862                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  863                                     tid | v)) {
  864                                         sleepq_release(&lk->lock_object);
  865                                         LOCK_LOG2(lk,
  866                                             "%s: %p claimed by a new writer",
  867                                             __func__, lk);
  868                                         break;
  869                                 }
  870                                 sleepq_release(&lk->lock_object);
  871                                 continue;
  872                         }
  873 
  874                         /*
  875                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
  876                          * fail, loop back and retry.
  877                          */
  878                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  879                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  880                                     x | LK_EXCLUSIVE_WAITERS)) {
  881                                         sleepq_release(&lk->lock_object);
  882                                         continue;
  883                                 }
  884                                 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  885                                     __func__, lk);
  886                         }
  887 
  888                         /*
  889                          * As far as we have been unable to acquire the
  890                          * exclusive lock and the exclusive waiters flag
  891                          * is set, we will sleep.
  892                          */
  893                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  894                             SQ_EXCLUSIVE_QUEUE);
  895                         flags &= ~LK_INTERLOCK;
  896                         if (error) {
  897                                 LOCK_LOG3(lk,
  898                                     "%s: interrupted sleep for %p with %d",
  899                                     __func__, lk, error);
  900                                 break;
  901                         }
  902                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  903                             __func__, lk);
  904                 }
  905                 if (error == 0) {
  906                         lock_profile_obtain_lock_success(&lk->lock_object,
  907                             contested, waittime, file, line);
  908                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  909                             lk->lk_recurse, file, line);
  910                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  911                             LK_TRYWIT(flags), file, line);
  912                         TD_LOCKS_INC(curthread);
  913                         STACK_SAVE(lk);
  914                 }
  915                 break;
  916         case LK_DOWNGRADE:
  917                 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  918                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  919                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  920                 TD_SLOCKS_INC(curthread);
  921 
  922                 /*
  923                  * In order to preserve waiters flags, just spin.
  924                  */
  925                 for (;;) {
  926                         x = lk->lk_lock;
  927                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  928                         x &= LK_ALL_WAITERS;
  929                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  930                             LK_SHARERS_LOCK(1) | x))
  931                                 break;
  932                         cpu_spinwait();
  933                 }
  934                 break;
  935         case LK_RELEASE:
  936                 _lockmgr_assert(lk, KA_LOCKED, file, line);
  937                 x = lk->lk_lock;
  938 
  939                 if ((x & LK_SHARE) == 0) {
  940 
  941                         /*
  942                          * As first option, treact the lock as if it has not
  943                          * any waiter.
  944                          * Fix-up the tid var if the lock has been disowned.
  945                          */
  946                         if (LK_HOLDER(x) == LK_KERNPROC)
  947                                 tid = LK_KERNPROC;
  948                         else {
  949                                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  950                                     file, line);
  951                                 TD_LOCKS_DEC(curthread);
  952                         }
  953                         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  954                             lk->lk_recurse, file, line);
  955 
  956                         /*
  957                          * The lock is held in exclusive mode.
  958                          * If the lock is recursed also, then unrecurse it.
  959                          */
  960                         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  961                                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  962                                     lk);
  963                                 lk->lk_recurse--;
  964                                 break;
  965                         }
  966                         if (tid != LK_KERNPROC)
  967                                 lock_profile_release_lock(&lk->lock_object);
  968 
  969                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  970                             LK_UNLOCKED))
  971                                 break;
  972 
  973                         sleepq_lock(&lk->lock_object);
  974                         x = lk->lk_lock;
  975                         v = LK_UNLOCKED;
  976 
  977                         /*
  978                          * If the lock has exclusive waiters, give them
  979                          * preference in order to avoid deadlock with
  980                          * shared runners up.
  981                          * If interruptible sleeps left the exclusive queue
  982                          * empty avoid a starvation for the threads sleeping
  983                          * on the shared queue by giving them precedence
  984                          * and cleaning up the exclusive waiters bit anyway.
  985                          * Please note that lk_exslpfail count may be lying
  986                          * about the real number of waiters with the
  987                          * LK_SLEEPFAIL flag on because they may be used in
  988                          * conjuction with interruptible sleeps so
  989                          * lk_exslpfail might be considered an 'upper limit'
  990                          * bound, including the edge cases.
  991                          */
  992                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  993                         realexslp = sleepq_sleepcnt(&lk->lock_object,
  994                             SQ_EXCLUSIVE_QUEUE);
  995                         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
  996                                 if (lk->lk_exslpfail < realexslp) {
  997                                         lk->lk_exslpfail = 0;
  998                                         queue = SQ_EXCLUSIVE_QUEUE;
  999                                         v |= (x & LK_SHARED_WAITERS);
 1000                                 } else {
 1001                                         lk->lk_exslpfail = 0;
 1002                                         LOCK_LOG2(lk,
 1003                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1004                                             __func__, lk);
 1005                                         LOCK_LOG2(lk,
 1006                         "%s: %p waking up threads on the exclusive queue",
 1007                                             __func__, lk);
 1008                                         wakeup_swapper =
 1009                                             sleepq_broadcast(&lk->lock_object,
 1010                                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
 1011                                         queue = SQ_SHARED_QUEUE;
 1012                                 }
 1013                         } else {
 1014 
 1015                                 /*
 1016                                  * Exclusive waiters sleeping with LK_SLEEPFAIL
 1017                                  * on and using interruptible sleeps/timeout
 1018                                  * may have left spourious lk_exslpfail counts
 1019                                  * on, so clean it up anyway. 
 1020                                  */
 1021                                 lk->lk_exslpfail = 0;
 1022                                 queue = SQ_SHARED_QUEUE;
 1023                         }
 1024 
 1025                         LOCK_LOG3(lk,
 1026                             "%s: %p waking up threads on the %s queue",
 1027                             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
 1028                             "exclusive");
 1029                         atomic_store_rel_ptr(&lk->lk_lock, v);
 1030                         wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
 1031                             SLEEPQ_LK, 0, queue);
 1032                         sleepq_release(&lk->lock_object);
 1033                         break;
 1034                 } else
 1035                         wakeup_swapper = wakeupshlk(lk, file, line);
 1036                 break;
 1037         case LK_DRAIN:
 1038                 if (LK_CAN_WITNESS(flags))
 1039                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
 1040                             LOP_EXCLUSIVE, file, line, ilk);
 1041 
 1042                 /*
 1043                  * Trying to drain a lock we already own will result in a
 1044                  * deadlock.
 1045                  */
 1046                 if (lockmgr_xlocked(lk)) {
 1047                         if (flags & LK_INTERLOCK)
 1048                                 class->lc_unlock(ilk);
 1049                         panic("%s: draining %s with the lock held @ %s:%d\n",
 1050                             __func__, iwmesg, file, line);
 1051                 }
 1052 
 1053                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
 1054                         lock_profile_obtain_lock_failed(&lk->lock_object,
 1055                             &contested, &waittime);
 1056 
 1057                         /*
 1058                          * If the lock is expected to not sleep just give up
 1059                          * and return.
 1060                          */
 1061                         if (LK_TRYOP(flags)) {
 1062                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
 1063                                     __func__, lk);
 1064                                 error = EBUSY;
 1065                                 break;
 1066                         }
 1067 
 1068                         /*
 1069                          * Acquire the sleepqueue chain lock because we
 1070                          * probabilly will need to manipulate waiters flags.
 1071                          */
 1072                         sleepq_lock(&lk->lock_object);
 1073                         x = lk->lk_lock;
 1074 
 1075                         /*
 1076                          * if the lock has been released while we spun on
 1077                          * the sleepqueue chain lock just try again.
 1078                          */
 1079                         if (x == LK_UNLOCKED) {
 1080                                 sleepq_release(&lk->lock_object);
 1081                                 continue;
 1082                         }
 1083 
 1084                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
 1085                         if ((x & ~v) == LK_UNLOCKED) {
 1086                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
 1087 
 1088                                 /*
 1089                                  * If interruptible sleeps left the exclusive
 1090                                  * queue empty avoid a starvation for the
 1091                                  * threads sleeping on the shared queue by
 1092                                  * giving them precedence and cleaning up the
 1093                                  * exclusive waiters bit anyway.
 1094                                  * Please note that lk_exslpfail count may be
 1095                                  * lying about the real number of waiters with
 1096                                  * the LK_SLEEPFAIL flag on because they may
 1097                                  * be used in conjuction with interruptible
 1098                                  * sleeps so lk_exslpfail might be considered
 1099                                  * an 'upper limit' bound, including the edge
 1100                                  * cases.
 1101                                  */
 1102                                 if (v & LK_EXCLUSIVE_WAITERS) {
 1103                                         queue = SQ_EXCLUSIVE_QUEUE;
 1104                                         v &= ~LK_EXCLUSIVE_WAITERS;
 1105                                 } else {
 1106 
 1107                                         /*
 1108                                          * Exclusive waiters sleeping with
 1109                                          * LK_SLEEPFAIL on and using
 1110                                          * interruptible sleeps/timeout may
 1111                                          * have left spourious lk_exslpfail
 1112                                          * counts on, so clean it up anyway.
 1113                                          */
 1114                                         MPASS(v & LK_SHARED_WAITERS);
 1115                                         lk->lk_exslpfail = 0;
 1116                                         queue = SQ_SHARED_QUEUE;
 1117                                         v &= ~LK_SHARED_WAITERS;
 1118                                 }
 1119                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
 1120                                         realexslp =
 1121                                             sleepq_sleepcnt(&lk->lock_object,
 1122                                             SQ_EXCLUSIVE_QUEUE);
 1123                                         if (lk->lk_exslpfail >= realexslp) {
 1124                                                 lk->lk_exslpfail = 0;
 1125                                                 queue = SQ_SHARED_QUEUE;
 1126                                                 v &= ~LK_SHARED_WAITERS;
 1127                                                 if (realexslp != 0) {
 1128                                                         LOCK_LOG2(lk,
 1129                                         "%s: %p has only LK_SLEEPFAIL sleepers",
 1130                                                             __func__, lk);
 1131                                                         LOCK_LOG2(lk,
 1132                         "%s: %p waking up threads on the exclusive queue",
 1133                                                             __func__, lk);
 1134                                                         wakeup_swapper =
 1135                                                             sleepq_broadcast(
 1136                                                             &lk->lock_object,
 1137                                                             SLEEPQ_LK, 0,
 1138                                                             SQ_EXCLUSIVE_QUEUE);
 1139                                                 }
 1140                                         } else
 1141                                                 lk->lk_exslpfail = 0;
 1142                                 }
 1143                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
 1144                                         sleepq_release(&lk->lock_object);
 1145                                         continue;
 1146                                 }
 1147                                 LOCK_LOG3(lk,
 1148                                 "%s: %p waking up all threads on the %s queue",
 1149                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
 1150                                     "shared" : "exclusive");
 1151                                 wakeup_swapper |= sleepq_broadcast(
 1152                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
 1153 
 1154                                 /*
 1155                                  * If shared waiters have been woken up we need
 1156                                  * to wait for one of them to acquire the lock
 1157                                  * before to set the exclusive waiters in
 1158                                  * order to avoid a deadlock.
 1159                                  */
 1160                                 if (queue == SQ_SHARED_QUEUE) {
 1161                                         for (v = lk->lk_lock;
 1162                                             (v & LK_SHARE) && !LK_SHARERS(v);
 1163                                             v = lk->lk_lock)
 1164                                                 cpu_spinwait();
 1165                                 }
 1166                         }
 1167 
 1168                         /*
 1169                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
 1170                          * fail, loop back and retry.
 1171                          */
 1172                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
 1173                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
 1174                                     x | LK_EXCLUSIVE_WAITERS)) {
 1175                                         sleepq_release(&lk->lock_object);
 1176                                         continue;
 1177                                 }
 1178                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
 1179                                     __func__, lk);
 1180                         }
 1181 
 1182                         /*
 1183                          * As far as we have been unable to acquire the
 1184                          * exclusive lock and the exclusive waiters flag
 1185                          * is set, we will sleep.
 1186                          */
 1187                         if (flags & LK_INTERLOCK) {
 1188                                 class->lc_unlock(ilk);
 1189                                 flags &= ~LK_INTERLOCK;
 1190                         }
 1191                         GIANT_SAVE();
 1192                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
 1193                             SQ_EXCLUSIVE_QUEUE);
 1194                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
 1195                         GIANT_RESTORE();
 1196                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
 1197                             __func__, lk);
 1198                 }
 1199 
 1200                 if (error == 0) {
 1201                         lock_profile_obtain_lock_success(&lk->lock_object,
 1202                             contested, waittime, file, line);
 1203                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
 1204                             lk->lk_recurse, file, line);
 1205                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
 1206                             LK_TRYWIT(flags), file, line);
 1207                         TD_LOCKS_INC(curthread);
 1208                         STACK_SAVE(lk);
 1209                 }
 1210                 break;
 1211         default:
 1212                 if (flags & LK_INTERLOCK)
 1213                         class->lc_unlock(ilk);
 1214                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
 1215         }
 1216 
 1217         if (flags & LK_INTERLOCK)
 1218                 class->lc_unlock(ilk);
 1219         if (wakeup_swapper)
 1220                 kick_proc0();
 1221 
 1222         return (error);
 1223 }
 1224 
 1225 void
 1226 _lockmgr_disown(struct lock *lk, const char *file, int line)
 1227 {
 1228         uintptr_t tid, x;
 1229 
 1230         tid = (uintptr_t)curthread;
 1231         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
 1232 
 1233         /*
 1234          * If the owner is already LK_KERNPROC just skip the whole operation.
 1235          */
 1236         if (LK_HOLDER(lk->lk_lock) != tid)
 1237                 return;
 1238         lock_profile_release_lock(&lk->lock_object);
 1239         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
 1240         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
 1241         TD_LOCKS_DEC(curthread);
 1242         STACK_SAVE(lk);
 1243 
 1244         /*
 1245          * In order to preserve waiters flags, just spin.
 1246          */
 1247         for (;;) {
 1248                 x = lk->lk_lock;
 1249                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1250                 x &= LK_ALL_WAITERS;
 1251                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 1252                     LK_KERNPROC | x))
 1253                         return;
 1254                 cpu_spinwait();
 1255         }
 1256 }
 1257 
 1258 void
 1259 lockmgr_printinfo(struct lock *lk)
 1260 {
 1261         struct thread *td;
 1262         uintptr_t x;
 1263 
 1264         if (lk->lk_lock == LK_UNLOCKED)
 1265                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
 1266         else if (lk->lk_lock & LK_SHARE)
 1267                 printf("lock type %s: SHARED (count %ju)\n",
 1268                     lk->lock_object.lo_name,
 1269                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1270         else {
 1271                 td = lockmgr_xholder(lk);
 1272                 printf("lock type %s: EXCL by thread %p (pid %d)\n",
 1273                     lk->lock_object.lo_name, td, td->td_proc->p_pid);
 1274         }
 1275 
 1276         x = lk->lk_lock;
 1277         if (x & LK_EXCLUSIVE_WAITERS)
 1278                 printf(" with exclusive waiters pending\n");
 1279         if (x & LK_SHARED_WAITERS)
 1280                 printf(" with shared waiters pending\n");
 1281         if (x & LK_EXCLUSIVE_SPINNERS)
 1282                 printf(" with exclusive spinners pending\n");
 1283 
 1284         STACK_PRINT(lk);
 1285 }
 1286 
 1287 int
 1288 lockstatus(struct lock *lk)
 1289 {
 1290         uintptr_t v, x;
 1291         int ret;
 1292 
 1293         ret = LK_SHARED;
 1294         x = lk->lk_lock;
 1295         v = LK_HOLDER(x);
 1296 
 1297         if ((x & LK_SHARE) == 0) {
 1298                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
 1299                         ret = LK_EXCLUSIVE;
 1300                 else
 1301                         ret = LK_EXCLOTHER;
 1302         } else if (x == LK_UNLOCKED)
 1303                 ret = 0;
 1304 
 1305         return (ret);
 1306 }
 1307 
 1308 #ifdef INVARIANT_SUPPORT
 1309 
 1310 FEATURE(invariant_support,
 1311     "Support for modules compiled with INVARIANTS option");
 1312 
 1313 #ifndef INVARIANTS
 1314 #undef  _lockmgr_assert
 1315 #endif
 1316 
 1317 void
 1318 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
 1319 {
 1320         int slocked = 0;
 1321 
 1322         if (panicstr != NULL)
 1323                 return;
 1324         switch (what) {
 1325         case KA_SLOCKED:
 1326         case KA_SLOCKED | KA_NOTRECURSED:
 1327         case KA_SLOCKED | KA_RECURSED:
 1328                 slocked = 1;
 1329         case KA_LOCKED:
 1330         case KA_LOCKED | KA_NOTRECURSED:
 1331         case KA_LOCKED | KA_RECURSED:
 1332 #ifdef WITNESS
 1333 
 1334                 /*
 1335                  * We cannot trust WITNESS if the lock is held in exclusive
 1336                  * mode and a call to lockmgr_disown() happened.
 1337                  * Workaround this skipping the check if the lock is held in
 1338                  * exclusive mode even for the KA_LOCKED case.
 1339                  */
 1340                 if (slocked || (lk->lk_lock & LK_SHARE)) {
 1341                         witness_assert(&lk->lock_object, what, file, line);
 1342                         break;
 1343                 }
 1344 #endif
 1345                 if (lk->lk_lock == LK_UNLOCKED ||
 1346                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
 1347                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
 1348                         panic("Lock %s not %slocked @ %s:%d\n",
 1349                             lk->lock_object.lo_name, slocked ? "share" : "",
 1350                             file, line);
 1351 
 1352                 if ((lk->lk_lock & LK_SHARE) == 0) {
 1353                         if (lockmgr_recursed(lk)) {
 1354                                 if (what & KA_NOTRECURSED)
 1355                                         panic("Lock %s recursed @ %s:%d\n",
 1356                                             lk->lock_object.lo_name, file,
 1357                                             line);
 1358                         } else if (what & KA_RECURSED)
 1359                                 panic("Lock %s not recursed @ %s:%d\n",
 1360                                     lk->lock_object.lo_name, file, line);
 1361                 }
 1362                 break;
 1363         case KA_XLOCKED:
 1364         case KA_XLOCKED | KA_NOTRECURSED:
 1365         case KA_XLOCKED | KA_RECURSED:
 1366                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
 1367                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1368                             lk->lock_object.lo_name, file, line);
 1369                 if (lockmgr_recursed(lk)) {
 1370                         if (what & KA_NOTRECURSED)
 1371                                 panic("Lock %s recursed @ %s:%d\n",
 1372                                     lk->lock_object.lo_name, file, line);
 1373                 } else if (what & KA_RECURSED)
 1374                         panic("Lock %s not recursed @ %s:%d\n",
 1375                             lk->lock_object.lo_name, file, line);
 1376                 break;
 1377         case KA_UNLOCKED:
 1378                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
 1379                         panic("Lock %s exclusively locked @ %s:%d\n",
 1380                             lk->lock_object.lo_name, file, line);
 1381                 break;
 1382         default:
 1383                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
 1384                     line);
 1385         }
 1386 }
 1387 #endif
 1388 
 1389 #ifdef DDB
 1390 int
 1391 lockmgr_chain(struct thread *td, struct thread **ownerp)
 1392 {
 1393         struct lock *lk;
 1394 
 1395         lk = td->td_wchan;
 1396 
 1397         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
 1398                 return (0);
 1399         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
 1400         if (lk->lk_lock & LK_SHARE)
 1401                 db_printf("SHARED (count %ju)\n",
 1402                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1403         else
 1404                 db_printf("EXCL\n");
 1405         *ownerp = lockmgr_xholder(lk);
 1406 
 1407         return (1);
 1408 }
 1409 
 1410 static void
 1411 db_show_lockmgr(struct lock_object *lock)
 1412 {
 1413         struct thread *td;
 1414         struct lock *lk;
 1415 
 1416         lk = (struct lock *)lock;
 1417 
 1418         db_printf(" state: ");
 1419         if (lk->lk_lock == LK_UNLOCKED)
 1420                 db_printf("UNLOCKED\n");
 1421         else if (lk->lk_lock & LK_SHARE)
 1422                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
 1423         else {
 1424                 td = lockmgr_xholder(lk);
 1425                 if (td == (struct thread *)LK_KERNPROC)
 1426                         db_printf("XLOCK: LK_KERNPROC\n");
 1427                 else
 1428                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1429                             td->td_tid, td->td_proc->p_pid,
 1430                             td->td_proc->p_comm);
 1431                 if (lockmgr_recursed(lk))
 1432                         db_printf(" recursed: %d\n", lk->lk_recurse);
 1433         }
 1434         db_printf(" waiters: ");
 1435         switch (lk->lk_lock & LK_ALL_WAITERS) {
 1436         case LK_SHARED_WAITERS:
 1437                 db_printf("shared\n");
 1438                 break;
 1439         case LK_EXCLUSIVE_WAITERS:
 1440                 db_printf("exclusive\n");
 1441                 break;
 1442         case LK_ALL_WAITERS:
 1443                 db_printf("shared and exclusive\n");
 1444                 break;
 1445         default:
 1446                 db_printf("none\n");
 1447         }
 1448         db_printf(" spinners: ");
 1449         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
 1450                 db_printf("exclusive\n");
 1451         else
 1452                 db_printf("none\n");
 1453 }
 1454 #endif

Cache object: 1fadbd7d18a05cb972f79de24406425c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.