The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice(s), this list of conditions and the following disclaimer as
   10  *    the first lines of this file unmodified other than the possible
   11  *    addition of one or more copyright notices.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice(s), this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   26  * DAMAGE.
   27  */
   28 
   29 #include "opt_adaptive_lockmgrs.h"
   30 #include "opt_ddb.h"
   31 #include "opt_kdtrace.h"
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/8.0/sys/kern/kern_lock.c 197981 2009-10-12 15:32:00Z attilio $");
   35 
   36 #include <sys/param.h>
   37 #include <sys/ktr.h>
   38 #include <sys/linker_set.h>
   39 #include <sys/lock.h>
   40 #include <sys/lock_profile.h>
   41 #include <sys/lockmgr.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/sleepqueue.h>
   45 #ifdef DEBUG_LOCKS
   46 #include <sys/stack.h>
   47 #endif
   48 #include <sys/sysctl.h>
   49 #include <sys/systm.h>
   50 
   51 #include <machine/cpu.h>
   52 
   53 #ifdef DDB
   54 #include <ddb/ddb.h>
   55 #endif
   56 
   57 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
   58     (LK_ADAPTIVE | LK_NOSHARE));
   59 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
   60     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
   61 
   62 #define SQ_EXCLUSIVE_QUEUE      0
   63 #define SQ_SHARED_QUEUE         1
   64 
   65 #ifdef ADAPTIVE_LOCKMGRS
   66 #define ALK_RETRIES             10
   67 #define ALK_LOOPS               10000
   68 #endif
   69 
   70 #ifndef INVARIANTS
   71 #define _lockmgr_assert(lk, what, file, line)
   72 #define TD_LOCKS_INC(td)
   73 #define TD_LOCKS_DEC(td)
   74 #else
   75 #define TD_LOCKS_INC(td)        ((td)->td_locks++)
   76 #define TD_LOCKS_DEC(td)        ((td)->td_locks--)
   77 #endif
   78 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
   79 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
   80 
   81 #ifndef DEBUG_LOCKS
   82 #define STACK_PRINT(lk)
   83 #define STACK_SAVE(lk)
   84 #define STACK_ZERO(lk)
   85 #else
   86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
   87 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
   88 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
   89 #endif
   90 
   91 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
   92         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   93                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
   94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
   95         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
   96                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
   97 
   98 #define GIANT_DECLARE                                                   \
   99         int _i = 0;                                                     \
  100         WITNESS_SAVE_DECL(Giant)
  101 #define GIANT_RESTORE() do {                                            \
  102         if (_i > 0) {                                                   \
  103                 while (_i--)                                            \
  104                         mtx_lock(&Giant);                               \
  105                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
  106         }                                                               \
  107 } while (0)
  108 #define GIANT_SAVE() do {                                               \
  109         if (mtx_owned(&Giant)) {                                        \
  110                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
  111                 while (mtx_owned(&Giant)) {                             \
  112                         _i++;                                           \
  113                         mtx_unlock(&Giant);                             \
  114                 }                                                       \
  115         }                                                               \
  116 } while (0)
  117 
  118 #define LK_CAN_SHARE(x)                                                 \
  119         (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||      \
  120         ((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||                           \
  121         curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
  122 #define LK_TRYOP(x)                                                     \
  123         ((x) & LK_NOWAIT)
  124 
  125 #define LK_CAN_WITNESS(x)                                               \
  126         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
  127 #define LK_TRYWIT(x)                                                    \
  128         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
  129 
  130 #define LK_CAN_ADAPT(lk, f)                                             \
  131         (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&             \
  132         ((f) & LK_SLEEPFAIL) == 0)
  133 
  134 #define lockmgr_disowned(lk)                                            \
  135         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
  136 
  137 #define lockmgr_xlocked(lk)                                             \
  138         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
  139 
  140 static void      assert_lockmgr(struct lock_object *lock, int how);
  141 #ifdef DDB
  142 static void      db_show_lockmgr(struct lock_object *lock);
  143 #endif
  144 static void      lock_lockmgr(struct lock_object *lock, int how);
  145 #ifdef KDTRACE_HOOKS
  146 static int       owner_lockmgr(struct lock_object *lock, struct thread **owner);
  147 #endif
  148 static int       unlock_lockmgr(struct lock_object *lock);
  149 
  150 struct lock_class lock_class_lockmgr = {
  151         .lc_name = "lockmgr",
  152         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
  153         .lc_assert = assert_lockmgr,
  154 #ifdef DDB
  155         .lc_ddb_show = db_show_lockmgr,
  156 #endif
  157         .lc_lock = lock_lockmgr,
  158         .lc_unlock = unlock_lockmgr,
  159 #ifdef KDTRACE_HOOKS
  160         .lc_owner = owner_lockmgr,
  161 #endif
  162 };
  163 
  164 static __inline struct thread *
  165 lockmgr_xholder(struct lock *lk)
  166 {
  167         uintptr_t x;
  168 
  169         x = lk->lk_lock;
  170         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
  171 }
  172 
  173 /*
  174  * It assumes sleepq_lock held and returns with this one unheld.
  175  * It also assumes the generic interlock is sane and previously checked.
  176  * If LK_INTERLOCK is specified the interlock is not reacquired after the
  177  * sleep.
  178  */
  179 static __inline int
  180 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
  181     const char *wmesg, int pri, int timo, int queue)
  182 {
  183         GIANT_DECLARE;
  184         struct lock_class *class;
  185         int catch, error;
  186 
  187         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  188         catch = pri & PCATCH;
  189         pri &= PRIMASK;
  190         error = 0;
  191 
  192         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
  193             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
  194 
  195         if (flags & LK_INTERLOCK)
  196                 class->lc_unlock(ilk);
  197         GIANT_SAVE();
  198         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
  199             SLEEPQ_INTERRUPTIBLE : 0), queue);
  200         if ((flags & LK_TIMELOCK) && timo)
  201                 sleepq_set_timeout(&lk->lock_object, timo);
  202 
  203         /*
  204          * Decisional switch for real sleeping.
  205          */
  206         if ((flags & LK_TIMELOCK) && timo && catch)
  207                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
  208         else if ((flags & LK_TIMELOCK) && timo)
  209                 error = sleepq_timedwait(&lk->lock_object, pri);
  210         else if (catch)
  211                 error = sleepq_wait_sig(&lk->lock_object, pri);
  212         else
  213                 sleepq_wait(&lk->lock_object, pri);
  214         GIANT_RESTORE();
  215         if ((flags & LK_SLEEPFAIL) && error == 0)
  216                 error = ENOLCK;
  217 
  218         return (error);
  219 }
  220 
  221 static __inline int
  222 wakeupshlk(struct lock *lk, const char *file, int line)
  223 {
  224         uintptr_t v, x;
  225         int queue, wakeup_swapper;
  226 
  227         TD_LOCKS_DEC(curthread);
  228         TD_SLOCKS_DEC(curthread);
  229         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
  230         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
  231 
  232         wakeup_swapper = 0;
  233         for (;;) {
  234                 x = lk->lk_lock;
  235 
  236                 /*
  237                  * If there is more than one shared lock held, just drop one
  238                  * and return.
  239                  */
  240                 if (LK_SHARERS(x) > 1) {
  241                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
  242                             x - LK_ONE_SHARER))
  243                                 break;
  244                         continue;
  245                 }
  246 
  247                 /*
  248                  * If there are not waiters on the exclusive queue, drop the
  249                  * lock quickly.
  250                  */
  251                 if ((x & LK_ALL_WAITERS) == 0) {
  252                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  253                             LK_SHARERS_LOCK(1));
  254                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
  255                                 break;
  256                         continue;
  257                 }
  258 
  259                 /*
  260                  * We should have a sharer with waiters, so enter the hard
  261                  * path in order to handle wakeups correctly.
  262                  */
  263                 sleepq_lock(&lk->lock_object);
  264                 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  265                 v = LK_UNLOCKED;
  266 
  267                 /*
  268                  * If the lock has exclusive waiters, give them preference in
  269                  * order to avoid deadlock with shared runners up.
  270                  */
  271                 if (x & LK_EXCLUSIVE_WAITERS) {
  272                         queue = SQ_EXCLUSIVE_QUEUE;
  273                         v |= (x & LK_SHARED_WAITERS);
  274                 } else {
  275                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
  276                             LK_SHARED_WAITERS);
  277                         queue = SQ_SHARED_QUEUE;
  278                 }
  279 
  280                 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
  281                     v)) {
  282                         sleepq_release(&lk->lock_object);
  283                         continue;
  284                 }
  285                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
  286                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  287                     "exclusive");
  288                 wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
  289                     0, queue);
  290                 sleepq_release(&lk->lock_object);
  291                 break;
  292         }
  293 
  294         lock_profile_release_lock(&lk->lock_object);
  295         return (wakeup_swapper);
  296 }
  297 
  298 static void
  299 assert_lockmgr(struct lock_object *lock, int what)
  300 {
  301 
  302         panic("lockmgr locks do not support assertions");
  303 }
  304 
  305 static void
  306 lock_lockmgr(struct lock_object *lock, int how)
  307 {
  308 
  309         panic("lockmgr locks do not support sleep interlocking");
  310 }
  311 
  312 static int
  313 unlock_lockmgr(struct lock_object *lock)
  314 {
  315 
  316         panic("lockmgr locks do not support sleep interlocking");
  317 }
  318 
  319 #ifdef KDTRACE_HOOKS
  320 static int
  321 owner_lockmgr(struct lock_object *lock, struct thread **owner)
  322 {
  323 
  324         panic("lockmgr locks do not support owner inquiring");
  325 }
  326 #endif
  327 
  328 void
  329 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
  330 {
  331         int iflags;
  332 
  333         MPASS((flags & ~LK_INIT_MASK) == 0);
  334         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
  335             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
  336             &lk->lk_lock));
  337 
  338         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
  339         if (flags & LK_CANRECURSE)
  340                 iflags |= LO_RECURSABLE;
  341         if ((flags & LK_NODUP) == 0)
  342                 iflags |= LO_DUPOK;
  343         if (flags & LK_NOPROFILE)
  344                 iflags |= LO_NOPROFILE;
  345         if ((flags & LK_NOWITNESS) == 0)
  346                 iflags |= LO_WITNESS;
  347         if (flags & LK_QUIET)
  348                 iflags |= LO_QUIET;
  349         iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
  350 
  351         lk->lk_lock = LK_UNLOCKED;
  352         lk->lk_recurse = 0;
  353         lk->lk_timo = timo;
  354         lk->lk_pri = pri;
  355         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
  356         STACK_ZERO(lk);
  357 }
  358 
  359 void
  360 lockdestroy(struct lock *lk)
  361 {
  362 
  363         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
  364         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
  365         lock_destroy(&lk->lock_object);
  366 }
  367 
  368 int
  369 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
  370     const char *wmesg, int pri, int timo, const char *file, int line)
  371 {
  372         GIANT_DECLARE;
  373         struct lock_class *class;
  374         const char *iwmesg;
  375         uintptr_t tid, v, x;
  376         u_int op;
  377         int error, ipri, itimo, queue, wakeup_swapper;
  378 #ifdef LOCK_PROFILING
  379         uint64_t waittime = 0;
  380         int contested = 0;
  381 #endif
  382 #ifdef ADAPTIVE_LOCKMGRS
  383         volatile struct thread *owner;
  384         u_int i, spintries = 0;
  385 #endif
  386 
  387         error = 0;
  388         tid = (uintptr_t)curthread;
  389         op = (flags & LK_TYPE_MASK);
  390         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
  391         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
  392         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
  393 
  394         MPASS((flags & ~LK_TOTAL_MASK) == 0);
  395         KASSERT((op & (op - 1)) == 0,
  396             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
  397         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
  398             (op != LK_DOWNGRADE && op != LK_RELEASE),
  399             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
  400             __func__, file, line));
  401         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
  402             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
  403             __func__, file, line));
  404 
  405         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
  406         if (panicstr != NULL) {
  407                 if (flags & LK_INTERLOCK)
  408                         class->lc_unlock(ilk);
  409                 return (0);
  410         }
  411 
  412         if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
  413                 op = LK_EXCLUSIVE;
  414 
  415         wakeup_swapper = 0;
  416         switch (op) {
  417         case LK_SHARED:
  418                 if (LK_CAN_WITNESS(flags))
  419                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
  420                             file, line, ilk);
  421                 for (;;) {
  422                         x = lk->lk_lock;
  423 
  424                         /*
  425                          * If no other thread has an exclusive lock, or
  426                          * no exclusive waiter is present, bump the count of
  427                          * sharers.  Since we have to preserve the state of
  428                          * waiters, if we fail to acquire the shared lock
  429                          * loop back and retry.
  430                          */
  431                         if (LK_CAN_SHARE(x)) {
  432                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  433                                     x + LK_ONE_SHARER))
  434                                         break;
  435                                 continue;
  436                         }
  437                         lock_profile_obtain_lock_failed(&lk->lock_object,
  438                             &contested, &waittime);
  439 
  440                         /*
  441                          * If the lock is already held by curthread in
  442                          * exclusive way avoid a deadlock.
  443                          */
  444                         if (LK_HOLDER(x) == tid) {
  445                                 LOCK_LOG2(lk,
  446                                     "%s: %p already held in exclusive mode",
  447                                     __func__, lk);
  448                                 error = EDEADLK;
  449                                 break;
  450                         }
  451 
  452                         /*
  453                          * If the lock is expected to not sleep just give up
  454                          * and return.
  455                          */
  456                         if (LK_TRYOP(flags)) {
  457                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  458                                     __func__, lk);
  459                                 error = EBUSY;
  460                                 break;
  461                         }
  462 
  463 #ifdef ADAPTIVE_LOCKMGRS
  464                         /*
  465                          * If the owner is running on another CPU, spin until
  466                          * the owner stops running or the state of the lock
  467                          * changes.  We need a double-state handle here
  468                          * because for a failed acquisition the lock can be
  469                          * either held in exclusive mode or shared mode
  470                          * (for the writer starvation avoidance technique).
  471                          */
  472                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  473                             LK_HOLDER(x) != LK_KERNPROC) {
  474                                 owner = (struct thread *)LK_HOLDER(x);
  475                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  476                                         CTR3(KTR_LOCK,
  477                                             "%s: spinning on %p held by %p",
  478                                             __func__, lk, owner);
  479 
  480                                 /*
  481                                  * If we are holding also an interlock drop it
  482                                  * in order to avoid a deadlock if the lockmgr
  483                                  * owner is adaptively spinning on the
  484                                  * interlock itself.
  485                                  */
  486                                 if (flags & LK_INTERLOCK) {
  487                                         class->lc_unlock(ilk);
  488                                         flags &= ~LK_INTERLOCK;
  489                                 }
  490                                 GIANT_SAVE();
  491                                 while (LK_HOLDER(lk->lk_lock) ==
  492                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  493                                         cpu_spinwait();
  494                                 GIANT_RESTORE();
  495                                 continue;
  496                         } else if (LK_CAN_ADAPT(lk, flags) &&
  497                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  498                             spintries < ALK_RETRIES) {
  499                                 if (flags & LK_INTERLOCK) {
  500                                         class->lc_unlock(ilk);
  501                                         flags &= ~LK_INTERLOCK;
  502                                 }
  503                                 GIANT_SAVE();
  504                                 spintries++;
  505                                 for (i = 0; i < ALK_LOOPS; i++) {
  506                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  507                                                 CTR4(KTR_LOCK,
  508                                     "%s: shared spinning on %p with %u and %u",
  509                                                     __func__, lk, spintries, i);
  510                                         x = lk->lk_lock;
  511                                         if ((x & LK_SHARE) == 0 ||
  512                                             LK_CAN_SHARE(x) != 0)
  513                                                 break;
  514                                         cpu_spinwait();
  515                                 }
  516                                 GIANT_RESTORE();
  517                                 if (i != ALK_LOOPS)
  518                                         continue;
  519                         }
  520 #endif
  521 
  522                         /*
  523                          * Acquire the sleepqueue chain lock because we
  524                          * probabilly will need to manipulate waiters flags.
  525                          */
  526                         sleepq_lock(&lk->lock_object);
  527                         x = lk->lk_lock;
  528 
  529                         /*
  530                          * if the lock can be acquired in shared mode, try
  531                          * again.
  532                          */
  533                         if (LK_CAN_SHARE(x)) {
  534                                 sleepq_release(&lk->lock_object);
  535                                 continue;
  536                         }
  537 
  538 #ifdef ADAPTIVE_LOCKMGRS
  539                         /*
  540                          * The current lock owner might have started executing
  541                          * on another CPU (or the lock could have changed
  542                          * owner) while we were waiting on the turnstile
  543                          * chain lock.  If so, drop the turnstile lock and try
  544                          * again.
  545                          */
  546                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  547                             LK_HOLDER(x) != LK_KERNPROC) {
  548                                 owner = (struct thread *)LK_HOLDER(x);
  549                                 if (TD_IS_RUNNING(owner)) {
  550                                         sleepq_release(&lk->lock_object);
  551                                         continue;
  552                                 }
  553                         }
  554 #endif
  555 
  556                         /*
  557                          * Try to set the LK_SHARED_WAITERS flag.  If we fail,
  558                          * loop back and retry.
  559                          */
  560                         if ((x & LK_SHARED_WAITERS) == 0) {
  561                                 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  562                                     x | LK_SHARED_WAITERS)) {
  563                                         sleepq_release(&lk->lock_object);
  564                                         continue;
  565                                 }
  566                                 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
  567                                     __func__, lk);
  568                         }
  569 
  570                         /*
  571                          * As far as we have been unable to acquire the
  572                          * shared lock and the shared waiters flag is set,
  573                          * we will sleep.
  574                          */
  575                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  576                             SQ_SHARED_QUEUE);
  577                         flags &= ~LK_INTERLOCK;
  578                         if (error) {
  579                                 LOCK_LOG3(lk,
  580                                     "%s: interrupted sleep for %p with %d",
  581                                     __func__, lk, error);
  582                                 break;
  583                         }
  584                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  585                             __func__, lk);
  586                 }
  587                 if (error == 0) {
  588                         lock_profile_obtain_lock_success(&lk->lock_object,
  589                             contested, waittime, file, line);
  590                         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
  591                             line);
  592                         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
  593                             line);
  594                         TD_LOCKS_INC(curthread);
  595                         TD_SLOCKS_INC(curthread);
  596                         STACK_SAVE(lk);
  597                 }
  598                 break;
  599         case LK_UPGRADE:
  600                 _lockmgr_assert(lk, KA_SLOCKED, file, line);
  601                 v = lk->lk_lock;
  602                 x = v & LK_ALL_WAITERS;
  603                 v &= LK_EXCLUSIVE_SPINNERS;
  604 
  605                 /*
  606                  * Try to switch from one shared lock to an exclusive one.
  607                  * We need to preserve waiters flags during the operation.
  608                  */
  609                 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
  610                     tid | x)) {
  611                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
  612                             line);
  613                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
  614                             LK_TRYWIT(flags), file, line);
  615                         TD_SLOCKS_DEC(curthread);
  616                         break;
  617                 }
  618 
  619                 /*
  620                  * We have been unable to succeed in upgrading, so just
  621                  * give up the shared lock.
  622                  */
  623                 wakeup_swapper |= wakeupshlk(lk, file, line);
  624 
  625                 /* FALLTHROUGH */
  626         case LK_EXCLUSIVE:
  627                 if (LK_CAN_WITNESS(flags))
  628                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  629                             LOP_EXCLUSIVE, file, line, ilk);
  630 
  631                 /*
  632                  * If curthread already holds the lock and this one is
  633                  * allowed to recurse, simply recurse on it.
  634                  */
  635                 if (lockmgr_xlocked(lk)) {
  636                         if ((flags & LK_CANRECURSE) == 0 &&
  637                             (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
  638 
  639                                 /*
  640                                  * If the lock is expected to not panic just
  641                                  * give up and return.
  642                                  */
  643                                 if (LK_TRYOP(flags)) {
  644                                         LOCK_LOG2(lk,
  645                                             "%s: %p fails the try operation",
  646                                             __func__, lk);
  647                                         error = EBUSY;
  648                                         break;
  649                                 }
  650                                 if (flags & LK_INTERLOCK)
  651                                         class->lc_unlock(ilk);
  652                 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
  653                                     __func__, iwmesg, file, line);
  654                         }
  655                         lk->lk_recurse++;
  656                         LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
  657                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  658                             lk->lk_recurse, file, line);
  659                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  660                             LK_TRYWIT(flags), file, line);
  661                         TD_LOCKS_INC(curthread);
  662                         break;
  663                 }
  664 
  665                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
  666                     tid)) {
  667                         lock_profile_obtain_lock_failed(&lk->lock_object,
  668                             &contested, &waittime);
  669 
  670                         /*
  671                          * If the lock is expected to not sleep just give up
  672                          * and return.
  673                          */
  674                         if (LK_TRYOP(flags)) {
  675                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  676                                     __func__, lk);
  677                                 error = EBUSY;
  678                                 break;
  679                         }
  680 
  681 #ifdef ADAPTIVE_LOCKMGRS
  682                         /*
  683                          * If the owner is running on another CPU, spin until
  684                          * the owner stops running or the state of the lock
  685                          * changes.
  686                          */
  687                         x = lk->lk_lock;
  688                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  689                             LK_HOLDER(x) != LK_KERNPROC) {
  690                                 owner = (struct thread *)LK_HOLDER(x);
  691                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
  692                                         CTR3(KTR_LOCK,
  693                                             "%s: spinning on %p held by %p",
  694                                             __func__, lk, owner);
  695 
  696                                 /*
  697                                  * If we are holding also an interlock drop it
  698                                  * in order to avoid a deadlock if the lockmgr
  699                                  * owner is adaptively spinning on the
  700                                  * interlock itself.
  701                                  */
  702                                 if (flags & LK_INTERLOCK) {
  703                                         class->lc_unlock(ilk);
  704                                         flags &= ~LK_INTERLOCK;
  705                                 }
  706                                 GIANT_SAVE();
  707                                 while (LK_HOLDER(lk->lk_lock) ==
  708                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
  709                                         cpu_spinwait();
  710                                 GIANT_RESTORE();
  711                                 continue;
  712                         } else if (LK_CAN_ADAPT(lk, flags) &&
  713                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
  714                             spintries < ALK_RETRIES) {
  715                                 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
  716                                     !atomic_cmpset_ptr(&lk->lk_lock, x,
  717                                     x | LK_EXCLUSIVE_SPINNERS))
  718                                         continue;
  719                                 if (flags & LK_INTERLOCK) {
  720                                         class->lc_unlock(ilk);
  721                                         flags &= ~LK_INTERLOCK;
  722                                 }
  723                                 GIANT_SAVE();
  724                                 spintries++;
  725                                 for (i = 0; i < ALK_LOOPS; i++) {
  726                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
  727                                                 CTR4(KTR_LOCK,
  728                                     "%s: shared spinning on %p with %u and %u",
  729                                                     __func__, lk, spintries, i);
  730                                         if ((lk->lk_lock &
  731                                             LK_EXCLUSIVE_SPINNERS) == 0)
  732                                                 break;
  733                                         cpu_spinwait();
  734                                 }
  735                                 GIANT_RESTORE();
  736                                 if (i != ALK_LOOPS)
  737                                         continue;
  738                         }
  739 #endif
  740 
  741                         /*
  742                          * Acquire the sleepqueue chain lock because we
  743                          * probabilly will need to manipulate waiters flags.
  744                          */
  745                         sleepq_lock(&lk->lock_object);
  746                         x = lk->lk_lock;
  747 
  748                         /*
  749                          * if the lock has been released while we spun on
  750                          * the sleepqueue chain lock just try again.
  751                          */
  752                         if (x == LK_UNLOCKED) {
  753                                 sleepq_release(&lk->lock_object);
  754                                 continue;
  755                         }
  756 
  757 #ifdef ADAPTIVE_LOCKMGRS
  758                         /*
  759                          * The current lock owner might have started executing
  760                          * on another CPU (or the lock could have changed
  761                          * owner) while we were waiting on the turnstile
  762                          * chain lock.  If so, drop the turnstile lock and try
  763                          * again.
  764                          */
  765                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
  766                             LK_HOLDER(x) != LK_KERNPROC) {
  767                                 owner = (struct thread *)LK_HOLDER(x);
  768                                 if (TD_IS_RUNNING(owner)) {
  769                                         sleepq_release(&lk->lock_object);
  770                                         continue;
  771                                 }
  772                         }
  773 #endif
  774 
  775                         /*
  776                          * The lock can be in the state where there is a
  777                          * pending queue of waiters, but still no owner.
  778                          * This happens when the lock is contested and an
  779                          * owner is going to claim the lock.
  780                          * If curthread is the one successfully acquiring it
  781                          * claim lock ownership and return, preserving waiters
  782                          * flags.
  783                          */
  784                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  785                         if ((x & ~v) == LK_UNLOCKED) {
  786                                 v &= ~LK_EXCLUSIVE_SPINNERS;
  787                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
  788                                     tid | v)) {
  789                                         sleepq_release(&lk->lock_object);
  790                                         LOCK_LOG2(lk,
  791                                             "%s: %p claimed by a new writer",
  792                                             __func__, lk);
  793                                         break;
  794                                 }
  795                                 sleepq_release(&lk->lock_object);
  796                                 continue;
  797                         }
  798 
  799                         /*
  800                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
  801                          * fail, loop back and retry.
  802                          */
  803                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
  804                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
  805                                     x | LK_EXCLUSIVE_WAITERS)) {
  806                                         sleepq_release(&lk->lock_object);
  807                                         continue;
  808                                 }
  809                                 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
  810                                     __func__, lk);
  811                         }
  812 
  813                         /*
  814                          * As far as we have been unable to acquire the
  815                          * exclusive lock and the exclusive waiters flag
  816                          * is set, we will sleep.
  817                          */
  818                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
  819                             SQ_EXCLUSIVE_QUEUE);
  820                         flags &= ~LK_INTERLOCK;
  821                         if (error) {
  822                                 LOCK_LOG3(lk,
  823                                     "%s: interrupted sleep for %p with %d",
  824                                     __func__, lk, error);
  825                                 break;
  826                         }
  827                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
  828                             __func__, lk);
  829                 }
  830                 if (error == 0) {
  831                         lock_profile_obtain_lock_success(&lk->lock_object,
  832                             contested, waittime, file, line);
  833                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
  834                             lk->lk_recurse, file, line);
  835                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
  836                             LK_TRYWIT(flags), file, line);
  837                         TD_LOCKS_INC(curthread);
  838                         STACK_SAVE(lk);
  839                 }
  840                 break;
  841         case LK_DOWNGRADE:
  842                 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
  843                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
  844                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
  845                 TD_SLOCKS_INC(curthread);
  846 
  847                 /*
  848                  * In order to preserve waiters flags, just spin.
  849                  */
  850                 for (;;) {
  851                         x = lk->lk_lock;
  852                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  853                         x &= LK_ALL_WAITERS;
  854                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
  855                             LK_SHARERS_LOCK(1) | x))
  856                                 break;
  857                         cpu_spinwait();
  858                 }
  859                 break;
  860         case LK_RELEASE:
  861                 _lockmgr_assert(lk, KA_LOCKED, file, line);
  862                 x = lk->lk_lock;
  863 
  864                 if ((x & LK_SHARE) == 0) {
  865 
  866                         /*
  867                          * As first option, treact the lock as if it has not
  868                          * any waiter.
  869                          * Fix-up the tid var if the lock has been disowned.
  870                          */
  871                         if (LK_HOLDER(x) == LK_KERNPROC)
  872                                 tid = LK_KERNPROC;
  873                         else {
  874                                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
  875                                     file, line);
  876                                 TD_LOCKS_DEC(curthread);
  877                         }
  878                         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
  879                             lk->lk_recurse, file, line);
  880 
  881                         /*
  882                          * The lock is held in exclusive mode.
  883                          * If the lock is recursed also, then unrecurse it.
  884                          */
  885                         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
  886                                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
  887                                     lk);
  888                                 lk->lk_recurse--;
  889                                 break;
  890                         }
  891                         if (tid != LK_KERNPROC)
  892                                 lock_profile_release_lock(&lk->lock_object);
  893 
  894                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
  895                             LK_UNLOCKED))
  896                                 break;
  897 
  898                         sleepq_lock(&lk->lock_object);
  899                         x = lk->lk_lock;
  900                         v = LK_UNLOCKED;
  901 
  902                         /*
  903                          * If the lock has exclusive waiters, give them
  904                          * preference in order to avoid deadlock with
  905                          * shared runners up.
  906                          */
  907                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
  908                         if (x & LK_EXCLUSIVE_WAITERS) {
  909                                 queue = SQ_EXCLUSIVE_QUEUE;
  910                                 v |= (x & LK_SHARED_WAITERS);
  911                         } else {
  912                                 MPASS((x & LK_ALL_WAITERS) ==
  913                                     LK_SHARED_WAITERS);
  914                                 queue = SQ_SHARED_QUEUE;
  915                         }
  916 
  917                         LOCK_LOG3(lk,
  918                             "%s: %p waking up threads on the %s queue",
  919                             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
  920                             "exclusive");
  921                         atomic_store_rel_ptr(&lk->lk_lock, v);
  922                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
  923                             SLEEPQ_LK, 0, queue);
  924                         sleepq_release(&lk->lock_object);
  925                         break;
  926                 } else
  927                         wakeup_swapper = wakeupshlk(lk, file, line);
  928                 break;
  929         case LK_DRAIN:
  930                 if (LK_CAN_WITNESS(flags))
  931                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
  932                             LOP_EXCLUSIVE, file, line, ilk);
  933 
  934                 /*
  935                  * Trying to drain a lock we already own will result in a
  936                  * deadlock.
  937                  */
  938                 if (lockmgr_xlocked(lk)) {
  939                         if (flags & LK_INTERLOCK)
  940                                 class->lc_unlock(ilk);
  941                         panic("%s: draining %s with the lock held @ %s:%d\n",
  942                             __func__, iwmesg, file, line);
  943                 }
  944 
  945                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
  946                         lock_profile_obtain_lock_failed(&lk->lock_object,
  947                             &contested, &waittime);
  948 
  949                         /*
  950                          * If the lock is expected to not sleep just give up
  951                          * and return.
  952                          */
  953                         if (LK_TRYOP(flags)) {
  954                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
  955                                     __func__, lk);
  956                                 error = EBUSY;
  957                                 break;
  958                         }
  959 
  960                         /*
  961                          * Acquire the sleepqueue chain lock because we
  962                          * probabilly will need to manipulate waiters flags.
  963                          */
  964                         sleepq_lock(&lk->lock_object);
  965                         x = lk->lk_lock;
  966 
  967                         /*
  968                          * if the lock has been released while we spun on
  969                          * the sleepqueue chain lock just try again.
  970                          */
  971                         if (x == LK_UNLOCKED) {
  972                                 sleepq_release(&lk->lock_object);
  973                                 continue;
  974                         }
  975 
  976                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
  977                         if ((x & ~v) == LK_UNLOCKED) {
  978                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
  979                                 if (v & LK_EXCLUSIVE_WAITERS) {
  980                                         queue = SQ_EXCLUSIVE_QUEUE;
  981                                         v &= ~LK_EXCLUSIVE_WAITERS;
  982                                 } else {
  983                                         MPASS(v & LK_SHARED_WAITERS);
  984                                         queue = SQ_SHARED_QUEUE;
  985                                         v &= ~LK_SHARED_WAITERS;
  986                                 }
  987                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
  988                                         sleepq_release(&lk->lock_object);
  989                                         continue;
  990                                 }
  991                                 LOCK_LOG3(lk,
  992                                 "%s: %p waking up all threads on the %s queue",
  993                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
  994                                     "shared" : "exclusive");
  995                                 wakeup_swapper |= sleepq_broadcast(
  996                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
  997 
  998                                 /*
  999                                  * If shared waiters have been woken up we need
 1000                                  * to wait for one of them to acquire the lock
 1001                                  * before to set the exclusive waiters in
 1002                                  * order to avoid a deadlock.
 1003                                  */
 1004                                 if (queue == SQ_SHARED_QUEUE) {
 1005                                         for (v = lk->lk_lock;
 1006                                             (v & LK_SHARE) && !LK_SHARERS(v);
 1007                                             v = lk->lk_lock)
 1008                                                 cpu_spinwait();
 1009                                 }
 1010                         }
 1011 
 1012                         /*
 1013                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
 1014                          * fail, loop back and retry.
 1015                          */
 1016                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
 1017                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
 1018                                     x | LK_EXCLUSIVE_WAITERS)) {
 1019                                         sleepq_release(&lk->lock_object);
 1020                                         continue;
 1021                                 }
 1022                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
 1023                                     __func__, lk);
 1024                         }
 1025 
 1026                         /*
 1027                          * As far as we have been unable to acquire the
 1028                          * exclusive lock and the exclusive waiters flag
 1029                          * is set, we will sleep.
 1030                          */
 1031                         if (flags & LK_INTERLOCK) {
 1032                                 class->lc_unlock(ilk);
 1033                                 flags &= ~LK_INTERLOCK;
 1034                         }
 1035                         GIANT_SAVE();
 1036                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
 1037                             SQ_EXCLUSIVE_QUEUE);
 1038                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
 1039                         GIANT_RESTORE();
 1040                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
 1041                             __func__, lk);
 1042                 }
 1043 
 1044                 if (error == 0) {
 1045                         lock_profile_obtain_lock_success(&lk->lock_object,
 1046                             contested, waittime, file, line);
 1047                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
 1048                             lk->lk_recurse, file, line);
 1049                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
 1050                             LK_TRYWIT(flags), file, line);
 1051                         TD_LOCKS_INC(curthread);
 1052                         STACK_SAVE(lk);
 1053                 }
 1054                 break;
 1055         default:
 1056                 if (flags & LK_INTERLOCK)
 1057                         class->lc_unlock(ilk);
 1058                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
 1059         }
 1060 
 1061         if (flags & LK_INTERLOCK)
 1062                 class->lc_unlock(ilk);
 1063         if (wakeup_swapper)
 1064                 kick_proc0();
 1065 
 1066         return (error);
 1067 }
 1068 
 1069 void
 1070 _lockmgr_disown(struct lock *lk, const char *file, int line)
 1071 {
 1072         uintptr_t tid, x;
 1073 
 1074         tid = (uintptr_t)curthread;
 1075         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
 1076 
 1077         /*
 1078          * If the owner is already LK_KERNPROC just skip the whole operation.
 1079          */
 1080         if (LK_HOLDER(lk->lk_lock) != tid)
 1081                 return;
 1082         lock_profile_release_lock(&lk->lock_object);
 1083         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
 1084         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
 1085         TD_LOCKS_DEC(curthread);
 1086 
 1087         /*
 1088          * In order to preserve waiters flags, just spin.
 1089          */
 1090         for (;;) {
 1091                 x = lk->lk_lock;
 1092                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
 1093                 x &= LK_ALL_WAITERS;
 1094                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
 1095                     LK_KERNPROC | x))
 1096                         return;
 1097                 cpu_spinwait();
 1098         }
 1099 }
 1100 
 1101 void
 1102 lockmgr_printinfo(struct lock *lk)
 1103 {
 1104         struct thread *td;
 1105         uintptr_t x;
 1106 
 1107         if (lk->lk_lock == LK_UNLOCKED)
 1108                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
 1109         else if (lk->lk_lock & LK_SHARE)
 1110                 printf("lock type %s: SHARED (count %ju)\n",
 1111                     lk->lock_object.lo_name,
 1112                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1113         else {
 1114                 td = lockmgr_xholder(lk);
 1115                 printf("lock type %s: EXCL by thread %p (pid %d)\n",
 1116                     lk->lock_object.lo_name, td, td->td_proc->p_pid);
 1117         }
 1118 
 1119         x = lk->lk_lock;
 1120         if (x & LK_EXCLUSIVE_WAITERS)
 1121                 printf(" with exclusive waiters pending\n");
 1122         if (x & LK_SHARED_WAITERS)
 1123                 printf(" with shared waiters pending\n");
 1124         if (x & LK_EXCLUSIVE_SPINNERS)
 1125                 printf(" with exclusive spinners pending\n");
 1126 
 1127         STACK_PRINT(lk);
 1128 }
 1129 
 1130 int
 1131 lockstatus(struct lock *lk)
 1132 {
 1133         uintptr_t v, x;
 1134         int ret;
 1135 
 1136         ret = LK_SHARED;
 1137         x = lk->lk_lock;
 1138         v = LK_HOLDER(x);
 1139 
 1140         if ((x & LK_SHARE) == 0) {
 1141                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
 1142                         ret = LK_EXCLUSIVE;
 1143                 else
 1144                         ret = LK_EXCLOTHER;
 1145         } else if (x == LK_UNLOCKED)
 1146                 ret = 0;
 1147 
 1148         return (ret);
 1149 }
 1150 
 1151 #ifdef INVARIANT_SUPPORT
 1152 #ifndef INVARIANTS
 1153 #undef  _lockmgr_assert
 1154 #endif
 1155 
 1156 void
 1157 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
 1158 {
 1159         int slocked = 0;
 1160 
 1161         if (panicstr != NULL)
 1162                 return;
 1163         switch (what) {
 1164         case KA_SLOCKED:
 1165         case KA_SLOCKED | KA_NOTRECURSED:
 1166         case KA_SLOCKED | KA_RECURSED:
 1167                 slocked = 1;
 1168         case KA_LOCKED:
 1169         case KA_LOCKED | KA_NOTRECURSED:
 1170         case KA_LOCKED | KA_RECURSED:
 1171 #ifdef WITNESS
 1172 
 1173                 /*
 1174                  * We cannot trust WITNESS if the lock is held in exclusive
 1175                  * mode and a call to lockmgr_disown() happened.
 1176                  * Workaround this skipping the check if the lock is held in
 1177                  * exclusive mode even for the KA_LOCKED case.
 1178                  */
 1179                 if (slocked || (lk->lk_lock & LK_SHARE)) {
 1180                         witness_assert(&lk->lock_object, what, file, line);
 1181                         break;
 1182                 }
 1183 #endif
 1184                 if (lk->lk_lock == LK_UNLOCKED ||
 1185                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
 1186                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
 1187                         panic("Lock %s not %slocked @ %s:%d\n",
 1188                             lk->lock_object.lo_name, slocked ? "share" : "",
 1189                             file, line);
 1190 
 1191                 if ((lk->lk_lock & LK_SHARE) == 0) {
 1192                         if (lockmgr_recursed(lk)) {
 1193                                 if (what & KA_NOTRECURSED)
 1194                                         panic("Lock %s recursed @ %s:%d\n",
 1195                                             lk->lock_object.lo_name, file,
 1196                                             line);
 1197                         } else if (what & KA_RECURSED)
 1198                                 panic("Lock %s not recursed @ %s:%d\n",
 1199                                     lk->lock_object.lo_name, file, line);
 1200                 }
 1201                 break;
 1202         case KA_XLOCKED:
 1203         case KA_XLOCKED | KA_NOTRECURSED:
 1204         case KA_XLOCKED | KA_RECURSED:
 1205                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
 1206                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1207                             lk->lock_object.lo_name, file, line);
 1208                 if (lockmgr_recursed(lk)) {
 1209                         if (what & KA_NOTRECURSED)
 1210                                 panic("Lock %s recursed @ %s:%d\n",
 1211                                     lk->lock_object.lo_name, file, line);
 1212                 } else if (what & KA_RECURSED)
 1213                         panic("Lock %s not recursed @ %s:%d\n",
 1214                             lk->lock_object.lo_name, file, line);
 1215                 break;
 1216         case KA_UNLOCKED:
 1217                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
 1218                         panic("Lock %s exclusively locked @ %s:%d\n",
 1219                             lk->lock_object.lo_name, file, line);
 1220                 break;
 1221         default:
 1222                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
 1223                     line);
 1224         }
 1225 }
 1226 #endif
 1227 
 1228 #ifdef DDB
 1229 int
 1230 lockmgr_chain(struct thread *td, struct thread **ownerp)
 1231 {
 1232         struct lock *lk;
 1233 
 1234         lk = td->td_wchan;
 1235 
 1236         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
 1237                 return (0);
 1238         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
 1239         if (lk->lk_lock & LK_SHARE)
 1240                 db_printf("SHARED (count %ju)\n",
 1241                     (uintmax_t)LK_SHARERS(lk->lk_lock));
 1242         else
 1243                 db_printf("EXCL\n");
 1244         *ownerp = lockmgr_xholder(lk);
 1245 
 1246         return (1);
 1247 }
 1248 
 1249 static void
 1250 db_show_lockmgr(struct lock_object *lock)
 1251 {
 1252         struct thread *td;
 1253         struct lock *lk;
 1254 
 1255         lk = (struct lock *)lock;
 1256 
 1257         db_printf(" state: ");
 1258         if (lk->lk_lock == LK_UNLOCKED)
 1259                 db_printf("UNLOCKED\n");
 1260         else if (lk->lk_lock & LK_SHARE)
 1261                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
 1262         else {
 1263                 td = lockmgr_xholder(lk);
 1264                 if (td == (struct thread *)LK_KERNPROC)
 1265                         db_printf("XLOCK: LK_KERNPROC\n");
 1266                 else
 1267                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1268                             td->td_tid, td->td_proc->p_pid,
 1269                             td->td_proc->p_comm);
 1270                 if (lockmgr_recursed(lk))
 1271                         db_printf(" recursed: %d\n", lk->lk_recurse);
 1272         }
 1273         db_printf(" waiters: ");
 1274         switch (lk->lk_lock & LK_ALL_WAITERS) {
 1275         case LK_SHARED_WAITERS:
 1276                 db_printf("shared\n");
 1277                 break;
 1278         case LK_EXCLUSIVE_WAITERS:
 1279                 db_printf("exclusive\n");
 1280                 break;
 1281         case LK_ALL_WAITERS:
 1282                 db_printf("shared and exclusive\n");
 1283                 break;
 1284         default:
 1285                 db_printf("none\n");
 1286         }
 1287         db_printf(" spinners: ");
 1288         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
 1289                 db_printf("exclusive\n");
 1290         else
 1291                 db_printf("none\n");
 1292 }
 1293 #endif

Cache object: 878cd3639d6b10169008810c2c16283a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.