The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_rwlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Machine independent bits of reader/writer lock implementation.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_rwlock.c 236238 2012-05-29 14:50:21Z fabient $");
   36 
   37 #include "opt_ddb.h"
   38 #include "opt_hwpmc_hooks.h"
   39 #include "opt_kdtrace.h"
   40 #include "opt_no_adaptive_rwlocks.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/ktr.h>
   44 #include <sys/kernel.h>
   45 #include <sys/lock.h>
   46 #include <sys/mutex.h>
   47 #include <sys/proc.h>
   48 #include <sys/rwlock.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/systm.h>
   51 #include <sys/turnstile.h>
   52 
   53 #include <machine/cpu.h>
   54 
   55 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
   56 #define ADAPTIVE_RWLOCKS
   57 #endif
   58 
   59 #ifdef HWPMC_HOOKS
   60 #include <sys/pmckern.h>
   61 PMC_SOFT_DECLARE( , , lock, failed);
   62 #endif
   63 
   64 #ifdef ADAPTIVE_RWLOCKS
   65 #define ROWNER_RETRIES  10
   66 #define ROWNER_LOOPS    10000
   67 #endif
   68 
   69 #ifdef DDB
   70 #include <ddb/ddb.h>
   71 
   72 static void     db_show_rwlock(struct lock_object *lock);
   73 #endif
   74 static void     assert_rw(struct lock_object *lock, int what);
   75 static void     lock_rw(struct lock_object *lock, int how);
   76 #ifdef KDTRACE_HOOKS
   77 static int      owner_rw(struct lock_object *lock, struct thread **owner);
   78 #endif
   79 static int      unlock_rw(struct lock_object *lock);
   80 
   81 struct lock_class lock_class_rw = {
   82         .lc_name = "rw",
   83         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
   84         .lc_assert = assert_rw,
   85 #ifdef DDB
   86         .lc_ddb_show = db_show_rwlock,
   87 #endif
   88         .lc_lock = lock_rw,
   89         .lc_unlock = unlock_rw,
   90 #ifdef KDTRACE_HOOKS
   91         .lc_owner = owner_rw,
   92 #endif
   93 };
   94 
   95 /*
   96  * Return a pointer to the owning thread if the lock is write-locked or
   97  * NULL if the lock is unlocked or read-locked.
   98  */
   99 #define rw_wowner(rw)                                                   \
  100         ((rw)->rw_lock & RW_LOCK_READ ? NULL :                          \
  101             (struct thread *)RW_OWNER((rw)->rw_lock))
  102 
  103 /*
  104  * Returns if a write owner is recursed.  Write ownership is not assured
  105  * here and should be previously checked.
  106  */
  107 #define rw_recursed(rw)         ((rw)->rw_recurse != 0)
  108 
  109 /*
  110  * Return true if curthread helds the lock.
  111  */
  112 #define rw_wlocked(rw)          (rw_wowner((rw)) == curthread)
  113 
  114 /*
  115  * Return a pointer to the owning thread for this lock who should receive
  116  * any priority lent by threads that block on this lock.  Currently this
  117  * is identical to rw_wowner().
  118  */
  119 #define rw_owner(rw)            rw_wowner(rw)
  120 
  121 #ifndef INVARIANTS
  122 #define _rw_assert(rw, what, file, line)
  123 #endif
  124 
  125 void
  126 assert_rw(struct lock_object *lock, int what)
  127 {
  128 
  129         rw_assert((struct rwlock *)lock, what);
  130 }
  131 
  132 void
  133 lock_rw(struct lock_object *lock, int how)
  134 {
  135         struct rwlock *rw;
  136 
  137         rw = (struct rwlock *)lock;
  138         if (how)
  139                 rw_wlock(rw);
  140         else
  141                 rw_rlock(rw);
  142 }
  143 
  144 int
  145 unlock_rw(struct lock_object *lock)
  146 {
  147         struct rwlock *rw;
  148 
  149         rw = (struct rwlock *)lock;
  150         rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
  151         if (rw->rw_lock & RW_LOCK_READ) {
  152                 rw_runlock(rw);
  153                 return (0);
  154         } else {
  155                 rw_wunlock(rw);
  156                 return (1);
  157         }
  158 }
  159 
  160 #ifdef KDTRACE_HOOKS
  161 int
  162 owner_rw(struct lock_object *lock, struct thread **owner)
  163 {
  164         struct rwlock *rw = (struct rwlock *)lock;
  165         uintptr_t x = rw->rw_lock;
  166 
  167         *owner = rw_wowner(rw);
  168         return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
  169             (*owner != NULL));
  170 }
  171 #endif
  172 
  173 void
  174 rw_init_flags(struct rwlock *rw, const char *name, int opts)
  175 {
  176         int flags;
  177 
  178         MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
  179             RW_RECURSE)) == 0);
  180         ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
  181             ("%s: rw_lock not aligned for %s: %p", __func__, name,
  182             &rw->rw_lock));
  183 
  184         flags = LO_UPGRADABLE;
  185         if (opts & RW_DUPOK)
  186                 flags |= LO_DUPOK;
  187         if (opts & RW_NOPROFILE)
  188                 flags |= LO_NOPROFILE;
  189         if (!(opts & RW_NOWITNESS))
  190                 flags |= LO_WITNESS;
  191         if (opts & RW_RECURSE)
  192                 flags |= LO_RECURSABLE;
  193         if (opts & RW_QUIET)
  194                 flags |= LO_QUIET;
  195 
  196         rw->rw_lock = RW_UNLOCKED;
  197         rw->rw_recurse = 0;
  198         lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
  199 }
  200 
  201 void
  202 rw_destroy(struct rwlock *rw)
  203 {
  204 
  205         KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
  206         KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
  207         rw->rw_lock = RW_DESTROYED;
  208         lock_destroy(&rw->lock_object);
  209 }
  210 
  211 void
  212 rw_sysinit(void *arg)
  213 {
  214         struct rw_args *args = arg;
  215 
  216         rw_init(args->ra_rw, args->ra_desc);
  217 }
  218 
  219 void
  220 rw_sysinit_flags(void *arg)
  221 {
  222         struct rw_args_flags *args = arg;
  223 
  224         rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
  225 }
  226 
  227 int
  228 rw_wowned(struct rwlock *rw)
  229 {
  230 
  231         return (rw_wowner(rw) == curthread);
  232 }
  233 
  234 void
  235 _rw_wlock(struct rwlock *rw, const char *file, int line)
  236 {
  237 
  238         if (SCHEDULER_STOPPED())
  239                 return;
  240         MPASS(curthread != NULL);
  241         KASSERT(rw->rw_lock != RW_DESTROYED,
  242             ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
  243         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
  244             line, NULL);
  245         __rw_wlock(rw, curthread, file, line);
  246         LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
  247         WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
  248         curthread->td_locks++;
  249 }
  250 
  251 int
  252 _rw_try_wlock(struct rwlock *rw, const char *file, int line)
  253 {
  254         int rval;
  255 
  256         if (SCHEDULER_STOPPED())
  257                 return (1);
  258 
  259         KASSERT(rw->rw_lock != RW_DESTROYED,
  260             ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
  261 
  262         if (rw_wlocked(rw) &&
  263             (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  264                 rw->rw_recurse++;
  265                 rval = 1;
  266         } else
  267                 rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
  268                     (uintptr_t)curthread);
  269 
  270         LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
  271         if (rval) {
  272                 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  273                     file, line);
  274                 curthread->td_locks++;
  275         }
  276         return (rval);
  277 }
  278 
  279 void
  280 _rw_wunlock(struct rwlock *rw, const char *file, int line)
  281 {
  282 
  283         if (SCHEDULER_STOPPED())
  284                 return;
  285         MPASS(curthread != NULL);
  286         KASSERT(rw->rw_lock != RW_DESTROYED,
  287             ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
  288         _rw_assert(rw, RA_WLOCKED, file, line);
  289         curthread->td_locks--;
  290         WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
  291         LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
  292             line);
  293         if (!rw_recursed(rw))
  294                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
  295         __rw_wunlock(rw, curthread, file, line);
  296 }
  297 /*
  298  * Determines whether a new reader can acquire a lock.  Succeeds if the
  299  * reader already owns a read lock and the lock is locked for read to
  300  * prevent deadlock from reader recursion.  Also succeeds if the lock
  301  * is unlocked and has no writer waiters or spinners.  Failing otherwise
  302  * prioritizes writers before readers.
  303  */
  304 #define RW_CAN_READ(_rw)                                                \
  305     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &      \
  306     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==  \
  307     RW_LOCK_READ)
  308 
  309 void
  310 _rw_rlock(struct rwlock *rw, const char *file, int line)
  311 {
  312         struct turnstile *ts;
  313 #ifdef ADAPTIVE_RWLOCKS
  314         volatile struct thread *owner;
  315         int spintries = 0;
  316         int i;
  317 #endif
  318 #ifdef LOCK_PROFILING
  319         uint64_t waittime = 0;
  320         int contested = 0;
  321 #endif
  322         uintptr_t v;
  323 #ifdef KDTRACE_HOOKS
  324         uint64_t spin_cnt = 0;
  325         uint64_t sleep_cnt = 0;
  326         int64_t sleep_time = 0;
  327 #endif
  328 
  329         if (SCHEDULER_STOPPED())
  330                 return;
  331 
  332         KASSERT(rw->rw_lock != RW_DESTROYED,
  333             ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
  334         KASSERT(rw_wowner(rw) != curthread,
  335             ("%s (%s): wlock already held @ %s:%d", __func__,
  336             rw->lock_object.lo_name, file, line));
  337         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
  338 
  339         for (;;) {
  340 #ifdef KDTRACE_HOOKS
  341                 spin_cnt++;
  342 #endif
  343                 /*
  344                  * Handle the easy case.  If no other thread has a write
  345                  * lock, then try to bump up the count of read locks.  Note
  346                  * that we have to preserve the current state of the
  347                  * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
  348                  * read lock, then rw_lock must have changed, so restart
  349                  * the loop.  Note that this handles the case of a
  350                  * completely unlocked rwlock since such a lock is encoded
  351                  * as a read lock with no waiters.
  352                  */
  353                 v = rw->rw_lock;
  354                 if (RW_CAN_READ(v)) {
  355                         /*
  356                          * The RW_LOCK_READ_WAITERS flag should only be set
  357                          * if the lock has been unlocked and write waiters
  358                          * were present.
  359                          */
  360                         if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
  361                             v + RW_ONE_READER)) {
  362                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  363                                         CTR4(KTR_LOCK,
  364                                             "%s: %p succeed %p -> %p", __func__,
  365                                             rw, (void *)v,
  366                                             (void *)(v + RW_ONE_READER));
  367                                 break;
  368                         }
  369                         continue;
  370                 }
  371 #ifdef HWPMC_HOOKS
  372                 PMC_SOFT_CALL( , , lock, failed);
  373 #endif
  374                 lock_profile_obtain_lock_failed(&rw->lock_object,
  375                     &contested, &waittime);
  376 
  377 #ifdef ADAPTIVE_RWLOCKS
  378                 /*
  379                  * If the owner is running on another CPU, spin until
  380                  * the owner stops running or the state of the lock
  381                  * changes.
  382                  */
  383                 if ((v & RW_LOCK_READ) == 0) {
  384                         owner = (struct thread *)RW_OWNER(v);
  385                         if (TD_IS_RUNNING(owner)) {
  386                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  387                                         CTR3(KTR_LOCK,
  388                                             "%s: spinning on %p held by %p",
  389                                             __func__, rw, owner);
  390                                 while ((struct thread*)RW_OWNER(rw->rw_lock) ==
  391                                     owner && TD_IS_RUNNING(owner)) {
  392                                         cpu_spinwait();
  393 #ifdef KDTRACE_HOOKS
  394                                         spin_cnt++;
  395 #endif
  396                                 }
  397                                 continue;
  398                         }
  399                 } else if (spintries < ROWNER_RETRIES) {
  400                         spintries++;
  401                         for (i = 0; i < ROWNER_LOOPS; i++) {
  402                                 v = rw->rw_lock;
  403                                 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
  404                                         break;
  405                                 cpu_spinwait();
  406                         }
  407                         if (i != ROWNER_LOOPS)
  408                                 continue;
  409                 }
  410 #endif
  411 
  412                 /*
  413                  * Okay, now it's the hard case.  Some other thread already
  414                  * has a write lock or there are write waiters present,
  415                  * acquire the turnstile lock so we can begin the process
  416                  * of blocking.
  417                  */
  418                 ts = turnstile_trywait(&rw->lock_object);
  419 
  420                 /*
  421                  * The lock might have been released while we spun, so
  422                  * recheck its state and restart the loop if needed.
  423                  */
  424                 v = rw->rw_lock;
  425                 if (RW_CAN_READ(v)) {
  426                         turnstile_cancel(ts);
  427                         continue;
  428                 }
  429 
  430 #ifdef ADAPTIVE_RWLOCKS
  431                 /*
  432                  * The current lock owner might have started executing
  433                  * on another CPU (or the lock could have changed
  434                  * owners) while we were waiting on the turnstile
  435                  * chain lock.  If so, drop the turnstile lock and try
  436                  * again.
  437                  */
  438                 if ((v & RW_LOCK_READ) == 0) {
  439                         owner = (struct thread *)RW_OWNER(v);
  440                         if (TD_IS_RUNNING(owner)) {
  441                                 turnstile_cancel(ts);
  442                                 continue;
  443                         }
  444                 }
  445 #endif
  446 
  447                 /*
  448                  * The lock is held in write mode or it already has waiters.
  449                  */
  450                 MPASS(!RW_CAN_READ(v));
  451 
  452                 /*
  453                  * If the RW_LOCK_READ_WAITERS flag is already set, then
  454                  * we can go ahead and block.  If it is not set then try
  455                  * to set it.  If we fail to set it drop the turnstile
  456                  * lock and restart the loop.
  457                  */
  458                 if (!(v & RW_LOCK_READ_WAITERS)) {
  459                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
  460                             v | RW_LOCK_READ_WAITERS)) {
  461                                 turnstile_cancel(ts);
  462                                 continue;
  463                         }
  464                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
  465                                 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
  466                                     __func__, rw);
  467                 }
  468 
  469                 /*
  470                  * We were unable to acquire the lock and the read waiters
  471                  * flag is set, so we must block on the turnstile.
  472                  */
  473                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  474                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
  475                             rw);
  476 #ifdef KDTRACE_HOOKS
  477                 sleep_time -= lockstat_nsecs();
  478 #endif
  479                 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
  480 #ifdef KDTRACE_HOOKS
  481                 sleep_time += lockstat_nsecs();
  482                 sleep_cnt++;
  483 #endif
  484                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  485                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
  486                             __func__, rw);
  487         }
  488 
  489         /*
  490          * TODO: acquire "owner of record" here.  Here be turnstile dragons
  491          * however.  turnstiles don't like owners changing between calls to
  492          * turnstile_wait() currently.
  493          */
  494         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
  495             waittime, file, line);
  496         LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
  497         WITNESS_LOCK(&rw->lock_object, 0, file, line);
  498         curthread->td_locks++;
  499         curthread->td_rw_rlocks++;
  500 #ifdef KDTRACE_HOOKS
  501         if (sleep_time)
  502                 LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
  503 
  504         /*
  505          * Record only the loops spinning and not sleeping. 
  506          */
  507         if (spin_cnt > sleep_cnt)
  508                 LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
  509 #endif
  510 }
  511 
  512 int
  513 _rw_try_rlock(struct rwlock *rw, const char *file, int line)
  514 {
  515         uintptr_t x;
  516 
  517         if (SCHEDULER_STOPPED())
  518                 return (1);
  519 
  520         for (;;) {
  521                 x = rw->rw_lock;
  522                 KASSERT(rw->rw_lock != RW_DESTROYED,
  523                     ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
  524                 if (!(x & RW_LOCK_READ))
  525                         break;
  526                 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
  527                         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
  528                             line);
  529                         WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
  530                         curthread->td_locks++;
  531                         curthread->td_rw_rlocks++;
  532                         return (1);
  533                 }
  534         }
  535 
  536         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
  537         return (0);
  538 }
  539 
  540 void
  541 _rw_runlock(struct rwlock *rw, const char *file, int line)
  542 {
  543         struct turnstile *ts;
  544         uintptr_t x, v, queue;
  545 
  546         if (SCHEDULER_STOPPED())
  547                 return;
  548 
  549         KASSERT(rw->rw_lock != RW_DESTROYED,
  550             ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
  551         _rw_assert(rw, RA_RLOCKED, file, line);
  552         curthread->td_locks--;
  553         curthread->td_rw_rlocks--;
  554         WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
  555         LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
  556 
  557         /* TODO: drop "owner of record" here. */
  558 
  559         for (;;) {
  560                 /*
  561                  * See if there is more than one read lock held.  If so,
  562                  * just drop one and return.
  563                  */
  564                 x = rw->rw_lock;
  565                 if (RW_READERS(x) > 1) {
  566                         if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
  567                             x - RW_ONE_READER)) {
  568                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  569                                         CTR4(KTR_LOCK,
  570                                             "%s: %p succeeded %p -> %p",
  571                                             __func__, rw, (void *)x,
  572                                             (void *)(x - RW_ONE_READER));
  573                                 break;
  574                         }
  575                         continue;
  576                 }
  577                 /*
  578                  * If there aren't any waiters for a write lock, then try
  579                  * to drop it quickly.
  580                  */
  581                 if (!(x & RW_LOCK_WAITERS)) {
  582                         MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
  583                             RW_READERS_LOCK(1));
  584                         if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
  585                             RW_UNLOCKED)) {
  586                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  587                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
  588                                             __func__, rw);
  589                                 break;
  590                         }
  591                         continue;
  592                 }
  593                 /*
  594                  * Ok, we know we have waiters and we think we are the
  595                  * last reader, so grab the turnstile lock.
  596                  */
  597                 turnstile_chain_lock(&rw->lock_object);
  598                 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
  599                 MPASS(v & RW_LOCK_WAITERS);
  600 
  601                 /*
  602                  * Try to drop our lock leaving the lock in a unlocked
  603                  * state.
  604                  *
  605                  * If you wanted to do explicit lock handoff you'd have to
  606                  * do it here.  You'd also want to use turnstile_signal()
  607                  * and you'd have to handle the race where a higher
  608                  * priority thread blocks on the write lock before the
  609                  * thread you wakeup actually runs and have the new thread
  610                  * "steal" the lock.  For now it's a lot simpler to just
  611                  * wakeup all of the waiters.
  612                  *
  613                  * As above, if we fail, then another thread might have
  614                  * acquired a read lock, so drop the turnstile lock and
  615                  * restart.
  616                  */
  617                 x = RW_UNLOCKED;
  618                 if (v & RW_LOCK_WRITE_WAITERS) {
  619                         queue = TS_EXCLUSIVE_QUEUE;
  620                         x |= (v & RW_LOCK_READ_WAITERS);
  621                 } else
  622                         queue = TS_SHARED_QUEUE;
  623                 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
  624                     x)) {
  625                         turnstile_chain_unlock(&rw->lock_object);
  626                         continue;
  627                 }
  628                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  629                         CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
  630                             __func__, rw);
  631 
  632                 /*
  633                  * Ok.  The lock is released and all that's left is to
  634                  * wake up the waiters.  Note that the lock might not be
  635                  * free anymore, but in that case the writers will just
  636                  * block again if they run before the new lock holder(s)
  637                  * release the lock.
  638                  */
  639                 ts = turnstile_lookup(&rw->lock_object);
  640                 MPASS(ts != NULL);
  641                 turnstile_broadcast(ts, queue);
  642                 turnstile_unpend(ts, TS_SHARED_LOCK);
  643                 turnstile_chain_unlock(&rw->lock_object);
  644                 break;
  645         }
  646         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
  647 }
  648 
  649 /*
  650  * This function is called when we are unable to obtain a write lock on the
  651  * first try.  This means that at least one other thread holds either a
  652  * read or write lock.
  653  */
  654 void
  655 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
  656 {
  657         struct turnstile *ts;
  658 #ifdef ADAPTIVE_RWLOCKS
  659         volatile struct thread *owner;
  660         int spintries = 0;
  661         int i;
  662 #endif
  663         uintptr_t v, x;
  664 #ifdef LOCK_PROFILING
  665         uint64_t waittime = 0;
  666         int contested = 0;
  667 #endif
  668 #ifdef KDTRACE_HOOKS
  669         uint64_t spin_cnt = 0;
  670         uint64_t sleep_cnt = 0;
  671         int64_t sleep_time = 0;
  672 #endif
  673 
  674         if (SCHEDULER_STOPPED())
  675                 return;
  676 
  677         if (rw_wlocked(rw)) {
  678                 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
  679                     ("%s: recursing but non-recursive rw %s @ %s:%d\n",
  680                     __func__, rw->lock_object.lo_name, file, line));
  681                 rw->rw_recurse++;
  682                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  683                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
  684                 return;
  685         }
  686 
  687         if (LOCK_LOG_TEST(&rw->lock_object, 0))
  688                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
  689                     rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
  690 
  691         while (!_rw_write_lock(rw, tid)) {
  692 #ifdef KDTRACE_HOOKS
  693                 spin_cnt++;
  694 #endif
  695 #ifdef HWPMC_HOOKS
  696                 PMC_SOFT_CALL( , , lock, failed);
  697 #endif
  698                 lock_profile_obtain_lock_failed(&rw->lock_object,
  699                     &contested, &waittime);
  700 #ifdef ADAPTIVE_RWLOCKS
  701                 /*
  702                  * If the lock is write locked and the owner is
  703                  * running on another CPU, spin until the owner stops
  704                  * running or the state of the lock changes.
  705                  */
  706                 v = rw->rw_lock;
  707                 owner = (struct thread *)RW_OWNER(v);
  708                 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
  709                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
  710                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
  711                                     __func__, rw, owner);
  712                         while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
  713                             TD_IS_RUNNING(owner)) {
  714                                 cpu_spinwait();
  715 #ifdef KDTRACE_HOOKS
  716                                 spin_cnt++;
  717 #endif
  718                         }
  719                         continue;
  720                 }
  721                 if ((v & RW_LOCK_READ) && RW_READERS(v) &&
  722                     spintries < ROWNER_RETRIES) {
  723                         if (!(v & RW_LOCK_WRITE_SPINNER)) {
  724                                 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
  725                                     v | RW_LOCK_WRITE_SPINNER)) {
  726                                         continue;
  727                                 }
  728                         }
  729                         spintries++;
  730                         for (i = 0; i < ROWNER_LOOPS; i++) {
  731                                 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
  732                                         break;
  733                                 cpu_spinwait();
  734                         }
  735 #ifdef KDTRACE_HOOKS
  736                         spin_cnt += ROWNER_LOOPS - i;
  737 #endif
  738                         if (i != ROWNER_LOOPS)
  739                                 continue;
  740                 }
  741 #endif
  742                 ts = turnstile_trywait(&rw->lock_object);
  743                 v = rw->rw_lock;
  744 
  745 #ifdef ADAPTIVE_RWLOCKS
  746                 /*
  747                  * The current lock owner might have started executing
  748                  * on another CPU (or the lock could have changed
  749                  * owners) while we were waiting on the turnstile
  750                  * chain lock.  If so, drop the turnstile lock and try
  751                  * again.
  752                  */
  753                 if (!(v & RW_LOCK_READ)) {
  754                         owner = (struct thread *)RW_OWNER(v);
  755                         if (TD_IS_RUNNING(owner)) {
  756                                 turnstile_cancel(ts);
  757                                 continue;
  758                         }
  759                 }
  760 #endif
  761                 /*
  762                  * Check for the waiters flags about this rwlock.
  763                  * If the lock was released, without maintain any pending
  764                  * waiters queue, simply try to acquire it.
  765                  * If a pending waiters queue is present, claim the lock
  766                  * ownership and maintain the pending queue.
  767                  */
  768                 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
  769                 if ((v & ~x) == RW_UNLOCKED) {
  770                         x &= ~RW_LOCK_WRITE_SPINNER;
  771                         if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
  772                                 if (x)
  773                                         turnstile_claim(ts);
  774                                 else
  775                                         turnstile_cancel(ts);
  776                                 break;
  777                         }
  778                         turnstile_cancel(ts);
  779                         continue;
  780                 }
  781                 /*
  782                  * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
  783                  * set it.  If we fail to set it, then loop back and try
  784                  * again.
  785                  */
  786                 if (!(v & RW_LOCK_WRITE_WAITERS)) {
  787                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
  788                             v | RW_LOCK_WRITE_WAITERS)) {
  789                                 turnstile_cancel(ts);
  790                                 continue;
  791                         }
  792                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
  793                                 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
  794                                     __func__, rw);
  795                 }
  796                 /*
  797                  * We were unable to acquire the lock and the write waiters
  798                  * flag is set, so we must block on the turnstile.
  799                  */
  800                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  801                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
  802                             rw);
  803 #ifdef KDTRACE_HOOKS
  804                 sleep_time -= lockstat_nsecs();
  805 #endif
  806                 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
  807 #ifdef KDTRACE_HOOKS
  808                 sleep_time += lockstat_nsecs();
  809                 sleep_cnt++;
  810 #endif
  811                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  812                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
  813                             __func__, rw);
  814 #ifdef ADAPTIVE_RWLOCKS
  815                 spintries = 0;
  816 #endif
  817         }
  818         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
  819             waittime, file, line);
  820 #ifdef KDTRACE_HOOKS
  821         if (sleep_time)
  822                 LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
  823 
  824         /*
  825          * Record only the loops spinning and not sleeping.
  826          */ 
  827         if (spin_cnt > sleep_cnt)
  828                 LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
  829 #endif
  830 }
  831 
  832 /*
  833  * This function is called if the first try at releasing a write lock failed.
  834  * This means that one of the 2 waiter bits must be set indicating that at
  835  * least one thread is waiting on this lock.
  836  */
  837 void
  838 _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
  839 {
  840         struct turnstile *ts;
  841         uintptr_t v;
  842         int queue;
  843 
  844         if (SCHEDULER_STOPPED())
  845                 return;
  846 
  847         if (rw_wlocked(rw) && rw_recursed(rw)) {
  848                 rw->rw_recurse--;
  849                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
  850                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
  851                 return;
  852         }
  853 
  854         KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
  855             ("%s: neither of the waiter flags are set", __func__));
  856 
  857         if (LOCK_LOG_TEST(&rw->lock_object, 0))
  858                 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
  859 
  860         turnstile_chain_lock(&rw->lock_object);
  861         ts = turnstile_lookup(&rw->lock_object);
  862         MPASS(ts != NULL);
  863 
  864         /*
  865          * Use the same algo as sx locks for now.  Prefer waking up shared
  866          * waiters if we have any over writers.  This is probably not ideal.
  867          *
  868          * 'v' is the value we are going to write back to rw_lock.  If we
  869          * have waiters on both queues, we need to preserve the state of
  870          * the waiter flag for the queue we don't wake up.  For now this is
  871          * hardcoded for the algorithm mentioned above.
  872          *
  873          * In the case of both readers and writers waiting we wakeup the
  874          * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
  875          * new writer comes in before a reader it will claim the lock up
  876          * above.  There is probably a potential priority inversion in
  877          * there that could be worked around either by waking both queues
  878          * of waiters or doing some complicated lock handoff gymnastics.
  879          */
  880         v = RW_UNLOCKED;
  881         if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
  882                 queue = TS_EXCLUSIVE_QUEUE;
  883                 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
  884         } else
  885                 queue = TS_SHARED_QUEUE;
  886 
  887         /* Wake up all waiters for the specific queue. */
  888         if (LOCK_LOG_TEST(&rw->lock_object, 0))
  889                 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
  890                     queue == TS_SHARED_QUEUE ? "read" : "write");
  891         turnstile_broadcast(ts, queue);
  892         atomic_store_rel_ptr(&rw->rw_lock, v);
  893         turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
  894         turnstile_chain_unlock(&rw->lock_object);
  895 }
  896 
  897 /*
  898  * Attempt to do a non-blocking upgrade from a read lock to a write
  899  * lock.  This will only succeed if this thread holds a single read
  900  * lock.  Returns true if the upgrade succeeded and false otherwise.
  901  */
  902 int
  903 _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
  904 {
  905         uintptr_t v, x, tid;
  906         struct turnstile *ts;
  907         int success;
  908 
  909         if (SCHEDULER_STOPPED())
  910                 return (1);
  911 
  912         KASSERT(rw->rw_lock != RW_DESTROYED,
  913             ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
  914         _rw_assert(rw, RA_RLOCKED, file, line);
  915 
  916         /*
  917          * Attempt to switch from one reader to a writer.  If there
  918          * are any write waiters, then we will have to lock the
  919          * turnstile first to prevent races with another writer
  920          * calling turnstile_wait() before we have claimed this
  921          * turnstile.  So, do the simple case of no waiters first.
  922          */
  923         tid = (uintptr_t)curthread;
  924         success = 0;
  925         for (;;) {
  926                 v = rw->rw_lock;
  927                 if (RW_READERS(v) > 1)
  928                         break;
  929                 if (!(v & RW_LOCK_WAITERS)) {
  930                         success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
  931                         if (!success)
  932                                 continue;
  933                         break;
  934                 }
  935 
  936                 /*
  937                  * Ok, we think we have waiters, so lock the turnstile.
  938                  */
  939                 ts = turnstile_trywait(&rw->lock_object);
  940                 v = rw->rw_lock;
  941                 if (RW_READERS(v) > 1) {
  942                         turnstile_cancel(ts);
  943                         break;
  944                 }
  945                 /*
  946                  * Try to switch from one reader to a writer again.  This time
  947                  * we honor the current state of the waiters flags.
  948                  * If we obtain the lock with the flags set, then claim
  949                  * ownership of the turnstile.
  950                  */
  951                 x = rw->rw_lock & RW_LOCK_WAITERS;
  952                 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
  953                 if (success) {
  954                         if (x)
  955                                 turnstile_claim(ts);
  956                         else
  957                                 turnstile_cancel(ts);
  958                         break;
  959                 }
  960                 turnstile_cancel(ts);
  961         }
  962         LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
  963         if (success) {
  964                 curthread->td_rw_rlocks--;
  965                 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  966                     file, line);
  967                 LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
  968         }
  969         return (success);
  970 }
  971 
  972 /*
  973  * Downgrade a write lock into a single read lock.
  974  */
  975 void
  976 _rw_downgrade(struct rwlock *rw, const char *file, int line)
  977 {
  978         struct turnstile *ts;
  979         uintptr_t tid, v;
  980         int rwait, wwait;
  981 
  982         if (SCHEDULER_STOPPED())
  983                 return;
  984 
  985         KASSERT(rw->rw_lock != RW_DESTROYED,
  986             ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
  987         _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
  988 #ifndef INVARIANTS
  989         if (rw_recursed(rw))
  990                 panic("downgrade of a recursed lock");
  991 #endif
  992 
  993         WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
  994 
  995         /*
  996          * Convert from a writer to a single reader.  First we handle
  997          * the easy case with no waiters.  If there are any waiters, we
  998          * lock the turnstile and "disown" the lock.
  999          */
 1000         tid = (uintptr_t)curthread;
 1001         if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
 1002                 goto out;
 1003 
 1004         /*
 1005          * Ok, we think we have waiters, so lock the turnstile so we can
 1006          * read the waiter flags without any races.
 1007          */
 1008         turnstile_chain_lock(&rw->lock_object);
 1009         v = rw->rw_lock & RW_LOCK_WAITERS;
 1010         rwait = v & RW_LOCK_READ_WAITERS;
 1011         wwait = v & RW_LOCK_WRITE_WAITERS;
 1012         MPASS(rwait | wwait);
 1013 
 1014         /*
 1015          * Downgrade from a write lock while preserving waiters flag
 1016          * and give up ownership of the turnstile.
 1017          */
 1018         ts = turnstile_lookup(&rw->lock_object);
 1019         MPASS(ts != NULL);
 1020         if (!wwait)
 1021                 v &= ~RW_LOCK_READ_WAITERS;
 1022         atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
 1023         /*
 1024          * Wake other readers if there are no writers pending.  Otherwise they
 1025          * won't be able to acquire the lock anyway.
 1026          */
 1027         if (rwait && !wwait) {
 1028                 turnstile_broadcast(ts, TS_SHARED_QUEUE);
 1029                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
 1030         } else
 1031                 turnstile_disown(ts);
 1032         turnstile_chain_unlock(&rw->lock_object);
 1033 out:
 1034         curthread->td_rw_rlocks++;
 1035         LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
 1036         LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
 1037 }
 1038 
 1039 #ifdef INVARIANT_SUPPORT
 1040 #ifndef INVARIANTS
 1041 #undef _rw_assert
 1042 #endif
 1043 
 1044 /*
 1045  * In the non-WITNESS case, rw_assert() can only detect that at least
 1046  * *some* thread owns an rlock, but it cannot guarantee that *this*
 1047  * thread owns an rlock.
 1048  */
 1049 void
 1050 _rw_assert(struct rwlock *rw, int what, const char *file, int line)
 1051 {
 1052 
 1053         if (panicstr != NULL)
 1054                 return;
 1055         switch (what) {
 1056         case RA_LOCKED:
 1057         case RA_LOCKED | RA_RECURSED:
 1058         case RA_LOCKED | RA_NOTRECURSED:
 1059         case RA_RLOCKED:
 1060 #ifdef WITNESS
 1061                 witness_assert(&rw->lock_object, what, file, line);
 1062 #else
 1063                 /*
 1064                  * If some other thread has a write lock or we have one
 1065                  * and are asserting a read lock, fail.  Also, if no one
 1066                  * has a lock at all, fail.
 1067                  */
 1068                 if (rw->rw_lock == RW_UNLOCKED ||
 1069                     (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
 1070                     rw_wowner(rw) != curthread)))
 1071                         panic("Lock %s not %slocked @ %s:%d\n",
 1072                             rw->lock_object.lo_name, (what == RA_RLOCKED) ?
 1073                             "read " : "", file, line);
 1074 
 1075                 if (!(rw->rw_lock & RW_LOCK_READ)) {
 1076                         if (rw_recursed(rw)) {
 1077                                 if (what & RA_NOTRECURSED)
 1078                                         panic("Lock %s recursed @ %s:%d\n",
 1079                                             rw->lock_object.lo_name, file,
 1080                                             line);
 1081                         } else if (what & RA_RECURSED)
 1082                                 panic("Lock %s not recursed @ %s:%d\n",
 1083                                     rw->lock_object.lo_name, file, line);
 1084                 }
 1085 #endif
 1086                 break;
 1087         case RA_WLOCKED:
 1088         case RA_WLOCKED | RA_RECURSED:
 1089         case RA_WLOCKED | RA_NOTRECURSED:
 1090                 if (rw_wowner(rw) != curthread)
 1091                         panic("Lock %s not exclusively locked @ %s:%d\n",
 1092                             rw->lock_object.lo_name, file, line);
 1093                 if (rw_recursed(rw)) {
 1094                         if (what & RA_NOTRECURSED)
 1095                                 panic("Lock %s recursed @ %s:%d\n",
 1096                                     rw->lock_object.lo_name, file, line);
 1097                 } else if (what & RA_RECURSED)
 1098                         panic("Lock %s not recursed @ %s:%d\n",
 1099                             rw->lock_object.lo_name, file, line);
 1100                 break;
 1101         case RA_UNLOCKED:
 1102 #ifdef WITNESS
 1103                 witness_assert(&rw->lock_object, what, file, line);
 1104 #else
 1105                 /*
 1106                  * If we hold a write lock fail.  We can't reliably check
 1107                  * to see if we hold a read lock or not.
 1108                  */
 1109                 if (rw_wowner(rw) == curthread)
 1110                         panic("Lock %s exclusively locked @ %s:%d\n",
 1111                             rw->lock_object.lo_name, file, line);
 1112 #endif
 1113                 break;
 1114         default:
 1115                 panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
 1116                     line);
 1117         }
 1118 }
 1119 #endif /* INVARIANT_SUPPORT */
 1120 
 1121 #ifdef DDB
 1122 void
 1123 db_show_rwlock(struct lock_object *lock)
 1124 {
 1125         struct rwlock *rw;
 1126         struct thread *td;
 1127 
 1128         rw = (struct rwlock *)lock;
 1129 
 1130         db_printf(" state: ");
 1131         if (rw->rw_lock == RW_UNLOCKED)
 1132                 db_printf("UNLOCKED\n");
 1133         else if (rw->rw_lock == RW_DESTROYED) {
 1134                 db_printf("DESTROYED\n");
 1135                 return;
 1136         } else if (rw->rw_lock & RW_LOCK_READ)
 1137                 db_printf("RLOCK: %ju locks\n",
 1138                     (uintmax_t)(RW_READERS(rw->rw_lock)));
 1139         else {
 1140                 td = rw_wowner(rw);
 1141                 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
 1142                     td->td_tid, td->td_proc->p_pid, td->td_name);
 1143                 if (rw_recursed(rw))
 1144                         db_printf(" recursed: %u\n", rw->rw_recurse);
 1145         }
 1146         db_printf(" waiters: ");
 1147         switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
 1148         case RW_LOCK_READ_WAITERS:
 1149                 db_printf("readers\n");
 1150                 break;
 1151         case RW_LOCK_WRITE_WAITERS:
 1152                 db_printf("writers\n");
 1153                 break;
 1154         case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
 1155                 db_printf("readers and writers\n");
 1156                 break;
 1157         default:
 1158                 db_printf("none\n");
 1159                 break;
 1160         }
 1161 }
 1162 
 1163 #endif

Cache object: 008cb2531225fc99570433a9bbbd953b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.