The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_turnstile.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. Berkeley Software Design Inc's name may not be used to endorse or
   15  *    promote products derived from this software without specific prior
   16  *    written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  *      from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
   31  *      and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
   32  */
   33 
   34 /*
   35  * Implementation of turnstiles used to hold queue of threads blocked on
   36  * non-sleepable locks.  Sleepable locks use condition variables to
   37  * implement their queues.  Turnstiles differ from a sleep queue in that
   38  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
   39  * when one thread is enqueued onto a turnstile, it can lend its priority
   40  * to the owning thread.
   41  *
   42  * We wish to avoid bloating locks with an embedded turnstile and we do not
   43  * want to use back-pointers in the locks for the same reason.  Thus, we
   44  * use a similar approach to that of Solaris 7 as described in Solaris
   45  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
   46  * in a hash table based on the address of the lock.  Each entry in the
   47  * hash table is a linked-lists of turnstiles and is called a turnstile
   48  * chain.  Each chain contains a spin mutex that protects all of the
   49  * turnstiles in the chain.
   50  *
   51  * Each time a thread is created, a turnstile is allocated from a UMA zone
   52  * and attached to that thread.  When a thread blocks on a lock, if it is the
   53  * first thread to block, it lends its turnstile to the lock.  If the lock
   54  * already has a turnstile, then it gives its turnstile to the lock's
   55  * turnstile's free list.  When a thread is woken up, it takes a turnstile from
   56  * the free list if there are any other waiters.  If it is the only thread
   57  * blocked on the lock, then it reclaims the turnstile associated with the lock
   58  * and removes it from the hash table.
   59  */
   60 
   61 #include <sys/cdefs.h>
   62 __FBSDID("$FreeBSD: releng/12.0/sys/kern/subr_turnstile.c 334546 2018-06-02 22:37:53Z mjg $");
   63 
   64 #include "opt_ddb.h"
   65 #include "opt_turnstile_profiling.h"
   66 #include "opt_sched.h"
   67 
   68 #include <sys/param.h>
   69 #include <sys/systm.h>
   70 #include <sys/kdb.h>
   71 #include <sys/kernel.h>
   72 #include <sys/ktr.h>
   73 #include <sys/lock.h>
   74 #include <sys/mutex.h>
   75 #include <sys/proc.h>
   76 #include <sys/queue.h>
   77 #include <sys/sched.h>
   78 #include <sys/sdt.h>
   79 #include <sys/sysctl.h>
   80 #include <sys/turnstile.h>
   81 
   82 #include <vm/uma.h>
   83 
   84 #ifdef DDB
   85 #include <ddb/ddb.h>
   86 #include <sys/lockmgr.h>
   87 #include <sys/sx.h>
   88 #endif
   89 
   90 /*
   91  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
   92  * number chosen because the sleep queue's use the same value for the
   93  * shift.  Basically, we ignore the lower 8 bits of the address.
   94  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
   95  */
   96 #define TC_TABLESIZE    128                     /* Must be power of 2. */
   97 #define TC_MASK         (TC_TABLESIZE - 1)
   98 #define TC_SHIFT        8
   99 #define TC_HASH(lock)   (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
  100 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
  101 
  102 /*
  103  * There are three different lists of turnstiles as follows.  The list
  104  * connected by ts_link entries is a per-thread list of all the turnstiles
  105  * attached to locks that we own.  This is used to fixup our priority when
  106  * a lock is released.  The other two lists use the ts_hash entries.  The
  107  * first of these two is the turnstile chain list that a turnstile is on
  108  * when it is attached to a lock.  The second list to use ts_hash is the
  109  * free list hung off of a turnstile that is attached to a lock.
  110  *
  111  * Each turnstile contains three lists of threads.  The two ts_blocked lists
  112  * are linked list of threads blocked on the turnstile's lock.  One list is
  113  * for exclusive waiters, and the other is for shared waiters.  The
  114  * ts_pending list is a linked list of threads previously awakened by
  115  * turnstile_signal() or turnstile_wait() that are waiting to be put on
  116  * the run queue.
  117  *
  118  * Locking key:
  119  *  c - turnstile chain lock
  120  *  q - td_contested lock
  121  */
  122 struct turnstile {
  123         struct mtx ts_lock;                     /* Spin lock for self. */
  124         struct threadqueue ts_blocked[2];       /* (c + q) Blocked threads. */
  125         struct threadqueue ts_pending;          /* (c) Pending threads. */
  126         LIST_ENTRY(turnstile) ts_hash;          /* (c) Chain and free list. */
  127         LIST_ENTRY(turnstile) ts_link;          /* (q) Contested locks. */
  128         LIST_HEAD(, turnstile) ts_free;         /* (c) Free turnstiles. */
  129         struct lock_object *ts_lockobj;         /* (c) Lock we reference. */
  130         struct thread *ts_owner;                /* (c + q) Who owns the lock. */
  131 };
  132 
  133 struct turnstile_chain {
  134         LIST_HEAD(, turnstile) tc_turnstiles;   /* List of turnstiles. */
  135         struct mtx tc_lock;                     /* Spin lock for this chain. */
  136 #ifdef TURNSTILE_PROFILING
  137         u_int   tc_depth;                       /* Length of tc_queues. */
  138         u_int   tc_max_depth;                   /* Max length of tc_queues. */
  139 #endif
  140 };
  141 
  142 #ifdef TURNSTILE_PROFILING
  143 u_int turnstile_max_depth;
  144 static SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0,
  145     "turnstile profiling");
  146 static SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
  147     "turnstile chain stats");
  148 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
  149     &turnstile_max_depth, 0, "maximum depth achieved of a single chain");
  150 #endif
  151 static struct mtx td_contested_lock;
  152 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
  153 static uma_zone_t turnstile_zone;
  154 
  155 /*
  156  * Prototypes for non-exported routines.
  157  */
  158 static void     init_turnstile0(void *dummy);
  159 #ifdef TURNSTILE_PROFILING
  160 static void     init_turnstile_profiling(void *arg);
  161 #endif
  162 static void     propagate_priority(struct thread *td);
  163 static int      turnstile_adjust_thread(struct turnstile *ts,
  164                     struct thread *td);
  165 static struct thread *turnstile_first_waiter(struct turnstile *ts);
  166 static void     turnstile_setowner(struct turnstile *ts, struct thread *owner);
  167 #ifdef INVARIANTS
  168 static void     turnstile_dtor(void *mem, int size, void *arg);
  169 #endif
  170 static int      turnstile_init(void *mem, int size, int flags);
  171 static void     turnstile_fini(void *mem, int size);
  172 
  173 SDT_PROVIDER_DECLARE(sched);
  174 SDT_PROBE_DEFINE(sched, , , sleep);
  175 SDT_PROBE_DEFINE2(sched, , , wakeup, "struct thread *", 
  176     "struct proc *");
  177 
  178 /*
  179  * Walks the chain of turnstiles and their owners to propagate the priority
  180  * of the thread being blocked to all the threads holding locks that have to
  181  * release their locks before this thread can run again.
  182  */
  183 static void
  184 propagate_priority(struct thread *td)
  185 {
  186         struct turnstile *ts;
  187         int pri;
  188 
  189         THREAD_LOCK_ASSERT(td, MA_OWNED);
  190         pri = td->td_priority;
  191         ts = td->td_blocked;
  192         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  193         /*
  194          * Grab a recursive lock on this turnstile chain so it stays locked
  195          * for the whole operation.  The caller expects us to return with
  196          * the original lock held.  We only ever lock down the chain so
  197          * the lock order is constant.
  198          */
  199         mtx_lock_spin(&ts->ts_lock);
  200         for (;;) {
  201                 td = ts->ts_owner;
  202 
  203                 if (td == NULL) {
  204                         /*
  205                          * This might be a read lock with no owner.  There's
  206                          * not much we can do, so just bail.
  207                          */
  208                         mtx_unlock_spin(&ts->ts_lock);
  209                         return;
  210                 }
  211 
  212                 thread_lock_flags(td, MTX_DUPOK);
  213                 mtx_unlock_spin(&ts->ts_lock);
  214                 MPASS(td->td_proc != NULL);
  215                 MPASS(td->td_proc->p_magic == P_MAGIC);
  216 
  217                 /*
  218                  * If the thread is asleep, then we are probably about
  219                  * to deadlock.  To make debugging this easier, show
  220                  * backtrace of misbehaving thread and panic to not
  221                  * leave the kernel deadlocked.
  222                  */
  223                 if (TD_IS_SLEEPING(td)) {
  224                         printf(
  225                 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
  226                             td->td_tid, td->td_proc->p_pid);
  227                         kdb_backtrace_thread(td);
  228                         panic("sleeping thread");
  229                 }
  230 
  231                 /*
  232                  * If this thread already has higher priority than the
  233                  * thread that is being blocked, we are finished.
  234                  */
  235                 if (td->td_priority <= pri) {
  236                         thread_unlock(td);
  237                         return;
  238                 }
  239 
  240                 /*
  241                  * Bump this thread's priority.
  242                  */
  243                 sched_lend_prio(td, pri);
  244 
  245                 /*
  246                  * If lock holder is actually running or on the run queue
  247                  * then we are done.
  248                  */
  249                 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
  250                         MPASS(td->td_blocked == NULL);
  251                         thread_unlock(td);
  252                         return;
  253                 }
  254 
  255 #ifndef SMP
  256                 /*
  257                  * For UP, we check to see if td is curthread (this shouldn't
  258                  * ever happen however as it would mean we are in a deadlock.)
  259                  */
  260                 KASSERT(td != curthread, ("Deadlock detected"));
  261 #endif
  262 
  263                 /*
  264                  * If we aren't blocked on a lock, we should be.
  265                  */
  266                 KASSERT(TD_ON_LOCK(td), (
  267                     "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
  268                     td->td_tid, td->td_name, td->td_state,
  269                     ts->ts_lockobj->lo_name));
  270 
  271                 /*
  272                  * Pick up the lock that td is blocked on.
  273                  */
  274                 ts = td->td_blocked;
  275                 MPASS(ts != NULL);
  276                 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  277                 /* Resort td on the list if needed. */
  278                 if (!turnstile_adjust_thread(ts, td)) {
  279                         mtx_unlock_spin(&ts->ts_lock);
  280                         return;
  281                 }
  282                 /* The thread lock is released as ts lock above. */
  283         }
  284 }
  285 
  286 /*
  287  * Adjust the thread's position on a turnstile after its priority has been
  288  * changed.
  289  */
  290 static int
  291 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
  292 {
  293         struct thread *td1, *td2;
  294         int queue;
  295 
  296         THREAD_LOCK_ASSERT(td, MA_OWNED);
  297         MPASS(TD_ON_LOCK(td));
  298 
  299         /*
  300          * This thread may not be blocked on this turnstile anymore
  301          * but instead might already be woken up on another CPU
  302          * that is waiting on the thread lock in turnstile_unpend() to
  303          * finish waking this thread up.  We can detect this case
  304          * by checking to see if this thread has been given a
  305          * turnstile by either turnstile_signal() or
  306          * turnstile_broadcast().  In this case, treat the thread as
  307          * if it was already running.
  308          */
  309         if (td->td_turnstile != NULL)
  310                 return (0);
  311 
  312         /*
  313          * Check if the thread needs to be moved on the blocked chain.
  314          * It needs to be moved if either its priority is lower than
  315          * the previous thread or higher than the next thread.
  316          */
  317         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  318         td1 = TAILQ_PREV(td, threadqueue, td_lockq);
  319         td2 = TAILQ_NEXT(td, td_lockq);
  320         if ((td1 != NULL && td->td_priority < td1->td_priority) ||
  321             (td2 != NULL && td->td_priority > td2->td_priority)) {
  322 
  323                 /*
  324                  * Remove thread from blocked chain and determine where
  325                  * it should be moved to.
  326                  */
  327                 queue = td->td_tsqueue;
  328                 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
  329                 mtx_lock_spin(&td_contested_lock);
  330                 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
  331                 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
  332                         MPASS(td1->td_proc->p_magic == P_MAGIC);
  333                         if (td1->td_priority > td->td_priority)
  334                                 break;
  335                 }
  336 
  337                 if (td1 == NULL)
  338                         TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
  339                 else
  340                         TAILQ_INSERT_BEFORE(td1, td, td_lockq);
  341                 mtx_unlock_spin(&td_contested_lock);
  342                 if (td1 == NULL)
  343                         CTR3(KTR_LOCK,
  344                     "turnstile_adjust_thread: td %d put at tail on [%p] %s",
  345                             td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
  346                 else
  347                         CTR4(KTR_LOCK,
  348                     "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
  349                             td->td_tid, td1->td_tid, ts->ts_lockobj,
  350                             ts->ts_lockobj->lo_name);
  351         }
  352         return (1);
  353 }
  354 
  355 /*
  356  * Early initialization of turnstiles.  This is not done via a SYSINIT()
  357  * since this needs to be initialized very early when mutexes are first
  358  * initialized.
  359  */
  360 void
  361 init_turnstiles(void)
  362 {
  363         int i;
  364 
  365         for (i = 0; i < TC_TABLESIZE; i++) {
  366                 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
  367                 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
  368                     NULL, MTX_SPIN);
  369         }
  370         mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
  371         LIST_INIT(&thread0.td_contested);
  372         thread0.td_turnstile = NULL;
  373 }
  374 
  375 #ifdef TURNSTILE_PROFILING
  376 static void
  377 init_turnstile_profiling(void *arg)
  378 {
  379         struct sysctl_oid *chain_oid;
  380         char chain_name[10];
  381         int i;
  382 
  383         for (i = 0; i < TC_TABLESIZE; i++) {
  384                 snprintf(chain_name, sizeof(chain_name), "%d", i);
  385                 chain_oid = SYSCTL_ADD_NODE(NULL, 
  386                     SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
  387                     chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
  388                 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
  389                     "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
  390                     NULL);
  391                 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
  392                     "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
  393                     0, NULL);
  394         }
  395 }
  396 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
  397     init_turnstile_profiling, NULL);
  398 #endif
  399 
  400 static void
  401 init_turnstile0(void *dummy)
  402 {
  403 
  404         turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
  405             NULL,
  406 #ifdef INVARIANTS
  407             turnstile_dtor,
  408 #else
  409             NULL,
  410 #endif
  411             turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
  412         thread0.td_turnstile = turnstile_alloc();
  413 }
  414 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
  415 
  416 /*
  417  * Update a thread on the turnstile list after it's priority has been changed.
  418  * The old priority is passed in as an argument.
  419  */
  420 void
  421 turnstile_adjust(struct thread *td, u_char oldpri)
  422 {
  423         struct turnstile *ts;
  424 
  425         MPASS(TD_ON_LOCK(td));
  426 
  427         /*
  428          * Pick up the lock that td is blocked on.
  429          */
  430         ts = td->td_blocked;
  431         MPASS(ts != NULL);
  432         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  433         mtx_assert(&ts->ts_lock, MA_OWNED);
  434 
  435         /* Resort the turnstile on the list. */
  436         if (!turnstile_adjust_thread(ts, td))
  437                 return;
  438         /*
  439          * If our priority was lowered and we are at the head of the
  440          * turnstile, then propagate our new priority up the chain.
  441          * Note that we currently don't try to revoke lent priorities
  442          * when our priority goes up.
  443          */
  444         MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
  445             td->td_tsqueue == TS_SHARED_QUEUE);
  446         if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
  447             td->td_priority < oldpri) {
  448                 propagate_priority(td);
  449         }
  450 }
  451 
  452 /*
  453  * Set the owner of the lock this turnstile is attached to.
  454  */
  455 static void
  456 turnstile_setowner(struct turnstile *ts, struct thread *owner)
  457 {
  458 
  459         mtx_assert(&td_contested_lock, MA_OWNED);
  460         MPASS(ts->ts_owner == NULL);
  461 
  462         /* A shared lock might not have an owner. */
  463         if (owner == NULL)
  464                 return;
  465 
  466         MPASS(owner->td_proc->p_magic == P_MAGIC);
  467         ts->ts_owner = owner;
  468         LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
  469 }
  470 
  471 #ifdef INVARIANTS
  472 /*
  473  * UMA zone item deallocator.
  474  */
  475 static void
  476 turnstile_dtor(void *mem, int size, void *arg)
  477 {
  478         struct turnstile *ts;
  479 
  480         ts = mem;
  481         MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
  482         MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
  483         MPASS(TAILQ_EMPTY(&ts->ts_pending));
  484 }
  485 #endif
  486 
  487 /*
  488  * UMA zone item initializer.
  489  */
  490 static int
  491 turnstile_init(void *mem, int size, int flags)
  492 {
  493         struct turnstile *ts;
  494 
  495         bzero(mem, size);
  496         ts = mem;
  497         TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
  498         TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
  499         TAILQ_INIT(&ts->ts_pending);
  500         LIST_INIT(&ts->ts_free);
  501         mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN | MTX_RECURSE);
  502         return (0);
  503 }
  504 
  505 static void
  506 turnstile_fini(void *mem, int size)
  507 {
  508         struct turnstile *ts;
  509 
  510         ts = mem;
  511         mtx_destroy(&ts->ts_lock);
  512 }
  513 
  514 /*
  515  * Get a turnstile for a new thread.
  516  */
  517 struct turnstile *
  518 turnstile_alloc(void)
  519 {
  520 
  521         return (uma_zalloc(turnstile_zone, M_WAITOK));
  522 }
  523 
  524 /*
  525  * Free a turnstile when a thread is destroyed.
  526  */
  527 void
  528 turnstile_free(struct turnstile *ts)
  529 {
  530 
  531         uma_zfree(turnstile_zone, ts);
  532 }
  533 
  534 /*
  535  * Lock the turnstile chain associated with the specified lock.
  536  */
  537 void
  538 turnstile_chain_lock(struct lock_object *lock)
  539 {
  540         struct turnstile_chain *tc;
  541 
  542         tc = TC_LOOKUP(lock);
  543         mtx_lock_spin(&tc->tc_lock);
  544 }
  545 
  546 struct turnstile *
  547 turnstile_trywait(struct lock_object *lock)
  548 {
  549         struct turnstile_chain *tc;
  550         struct turnstile *ts;
  551 
  552         tc = TC_LOOKUP(lock);
  553         mtx_lock_spin(&tc->tc_lock);
  554         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
  555                 if (ts->ts_lockobj == lock) {
  556                         mtx_lock_spin(&ts->ts_lock);
  557                         return (ts);
  558                 }
  559 
  560         ts = curthread->td_turnstile;
  561         MPASS(ts != NULL);
  562         mtx_lock_spin(&ts->ts_lock);
  563         KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
  564         ts->ts_lockobj = lock;
  565 
  566         return (ts);
  567 }
  568 
  569 struct thread *
  570 turnstile_lock(struct turnstile *ts, struct lock_object **lockp)
  571 {
  572         struct turnstile_chain *tc;
  573         struct lock_object *lock;
  574 
  575         if ((lock = ts->ts_lockobj) == NULL)
  576                 return (NULL);
  577         tc = TC_LOOKUP(lock);
  578         mtx_lock_spin(&tc->tc_lock);
  579         mtx_lock_spin(&ts->ts_lock);
  580         if (__predict_false(lock != ts->ts_lockobj)) {
  581                 mtx_unlock_spin(&tc->tc_lock);
  582                 mtx_unlock_spin(&ts->ts_lock);
  583                 return (NULL);
  584         }
  585         *lockp = lock;
  586         return (ts->ts_owner);
  587 }
  588 
  589 void
  590 turnstile_unlock(struct turnstile *ts, struct lock_object *lock)
  591 {
  592         struct turnstile_chain *tc;
  593 
  594         mtx_assert(&ts->ts_lock, MA_OWNED);
  595         mtx_unlock_spin(&ts->ts_lock);
  596         if (ts == curthread->td_turnstile)
  597                 ts->ts_lockobj = NULL;
  598         tc = TC_LOOKUP(lock);
  599         mtx_unlock_spin(&tc->tc_lock);
  600 }
  601 
  602 void
  603 turnstile_assert(struct turnstile *ts)
  604 {
  605         MPASS(ts->ts_lockobj == NULL);
  606 }
  607 
  608 void
  609 turnstile_cancel(struct turnstile *ts)
  610 {
  611         struct turnstile_chain *tc;
  612         struct lock_object *lock;
  613 
  614         mtx_assert(&ts->ts_lock, MA_OWNED);
  615 
  616         mtx_unlock_spin(&ts->ts_lock);
  617         lock = ts->ts_lockobj;
  618         if (ts == curthread->td_turnstile)
  619                 ts->ts_lockobj = NULL;
  620         tc = TC_LOOKUP(lock);
  621         mtx_unlock_spin(&tc->tc_lock);
  622 }
  623 
  624 /*
  625  * Look up the turnstile for a lock in the hash table locking the associated
  626  * turnstile chain along the way.  If no turnstile is found in the hash
  627  * table, NULL is returned.
  628  */
  629 struct turnstile *
  630 turnstile_lookup(struct lock_object *lock)
  631 {
  632         struct turnstile_chain *tc;
  633         struct turnstile *ts;
  634 
  635         tc = TC_LOOKUP(lock);
  636         mtx_assert(&tc->tc_lock, MA_OWNED);
  637         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
  638                 if (ts->ts_lockobj == lock) {
  639                         mtx_lock_spin(&ts->ts_lock);
  640                         return (ts);
  641                 }
  642         return (NULL);
  643 }
  644 
  645 /*
  646  * Unlock the turnstile chain associated with a given lock.
  647  */
  648 void
  649 turnstile_chain_unlock(struct lock_object *lock)
  650 {
  651         struct turnstile_chain *tc;
  652 
  653         tc = TC_LOOKUP(lock);
  654         mtx_unlock_spin(&tc->tc_lock);
  655 }
  656 
  657 /*
  658  * Return a pointer to the thread waiting on this turnstile with the
  659  * most important priority or NULL if the turnstile has no waiters.
  660  */
  661 static struct thread *
  662 turnstile_first_waiter(struct turnstile *ts)
  663 {
  664         struct thread *std, *xtd;
  665 
  666         std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
  667         xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
  668         if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
  669                 return (std);
  670         return (xtd);
  671 }
  672 
  673 /*
  674  * Take ownership of a turnstile and adjust the priority of the new
  675  * owner appropriately.
  676  */
  677 void
  678 turnstile_claim(struct turnstile *ts)
  679 {
  680         struct thread *td, *owner;
  681         struct turnstile_chain *tc;
  682 
  683         mtx_assert(&ts->ts_lock, MA_OWNED);
  684         MPASS(ts != curthread->td_turnstile);
  685 
  686         owner = curthread;
  687         mtx_lock_spin(&td_contested_lock);
  688         turnstile_setowner(ts, owner);
  689         mtx_unlock_spin(&td_contested_lock);
  690 
  691         td = turnstile_first_waiter(ts);
  692         MPASS(td != NULL);
  693         MPASS(td->td_proc->p_magic == P_MAGIC);
  694         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  695 
  696         /*
  697          * Update the priority of the new owner if needed.
  698          */
  699         thread_lock(owner);
  700         if (td->td_priority < owner->td_priority)
  701                 sched_lend_prio(owner, td->td_priority);
  702         thread_unlock(owner);
  703         tc = TC_LOOKUP(ts->ts_lockobj);
  704         mtx_unlock_spin(&ts->ts_lock);
  705         mtx_unlock_spin(&tc->tc_lock);
  706 }
  707 
  708 /*
  709  * Block the current thread on the turnstile assicated with 'lock'.  This
  710  * function will context switch and not return until this thread has been
  711  * woken back up.  This function must be called with the appropriate
  712  * turnstile chain locked and will return with it unlocked.
  713  */
  714 void
  715 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
  716 {
  717         struct turnstile_chain *tc;
  718         struct thread *td, *td1;
  719         struct lock_object *lock;
  720 
  721         td = curthread;
  722         mtx_assert(&ts->ts_lock, MA_OWNED);
  723         if (owner)
  724                 MPASS(owner->td_proc->p_magic == P_MAGIC);
  725         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
  726 
  727         /*
  728          * If the lock does not already have a turnstile, use this thread's
  729          * turnstile.  Otherwise insert the current thread into the
  730          * turnstile already in use by this lock.
  731          */
  732         tc = TC_LOOKUP(ts->ts_lockobj);
  733         mtx_assert(&tc->tc_lock, MA_OWNED);
  734         if (ts == td->td_turnstile) {
  735 #ifdef TURNSTILE_PROFILING
  736                 tc->tc_depth++;
  737                 if (tc->tc_depth > tc->tc_max_depth) {
  738                         tc->tc_max_depth = tc->tc_depth;
  739                         if (tc->tc_max_depth > turnstile_max_depth)
  740                                 turnstile_max_depth = tc->tc_max_depth;
  741                 }
  742 #endif
  743                 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
  744                 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
  745                     ("thread's turnstile has pending threads"));
  746                 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
  747                     ("thread's turnstile has exclusive waiters"));
  748                 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
  749                     ("thread's turnstile has shared waiters"));
  750                 KASSERT(LIST_EMPTY(&ts->ts_free),
  751                     ("thread's turnstile has a non-empty free list"));
  752                 MPASS(ts->ts_lockobj != NULL);
  753                 mtx_lock_spin(&td_contested_lock);
  754                 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
  755                 turnstile_setowner(ts, owner);
  756                 mtx_unlock_spin(&td_contested_lock);
  757         } else {
  758                 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
  759                         if (td1->td_priority > td->td_priority)
  760                                 break;
  761                 mtx_lock_spin(&td_contested_lock);
  762                 if (td1 != NULL)
  763                         TAILQ_INSERT_BEFORE(td1, td, td_lockq);
  764                 else
  765                         TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
  766                 MPASS(owner == ts->ts_owner);
  767                 mtx_unlock_spin(&td_contested_lock);
  768                 MPASS(td->td_turnstile != NULL);
  769                 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
  770         }
  771         thread_lock(td);
  772         thread_lock_set(td, &ts->ts_lock);
  773         td->td_turnstile = NULL;
  774 
  775         /* Save who we are blocked on and switch. */
  776         lock = ts->ts_lockobj;
  777         td->td_tsqueue = queue;
  778         td->td_blocked = ts;
  779         td->td_lockname = lock->lo_name;
  780         td->td_blktick = ticks;
  781         TD_SET_LOCK(td);
  782         mtx_unlock_spin(&tc->tc_lock);
  783         propagate_priority(td);
  784 
  785         if (LOCK_LOG_TEST(lock, 0))
  786                 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
  787                     td->td_tid, lock, lock->lo_name);
  788 
  789         SDT_PROBE0(sched, , , sleep);
  790 
  791         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  792         mi_switch(SW_VOL | SWT_TURNSTILE, NULL);
  793 
  794         if (LOCK_LOG_TEST(lock, 0))
  795                 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
  796                     __func__, td->td_tid, lock, lock->lo_name);
  797         thread_unlock(td);
  798 }
  799 
  800 /*
  801  * Pick the highest priority thread on this turnstile and put it on the
  802  * pending list.  This must be called with the turnstile chain locked.
  803  */
  804 int
  805 turnstile_signal(struct turnstile *ts, int queue)
  806 {
  807         struct turnstile_chain *tc __unused;
  808         struct thread *td;
  809         int empty;
  810 
  811         MPASS(ts != NULL);
  812         mtx_assert(&ts->ts_lock, MA_OWNED);
  813         MPASS(curthread->td_proc->p_magic == P_MAGIC);
  814         MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
  815         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
  816 
  817         /*
  818          * Pick the highest priority thread blocked on this lock and
  819          * move it to the pending list.
  820          */
  821         td = TAILQ_FIRST(&ts->ts_blocked[queue]);
  822         MPASS(td->td_proc->p_magic == P_MAGIC);
  823         mtx_lock_spin(&td_contested_lock);
  824         TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
  825         mtx_unlock_spin(&td_contested_lock);
  826         TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
  827 
  828         /*
  829          * If the turnstile is now empty, remove it from its chain and
  830          * give it to the about-to-be-woken thread.  Otherwise take a
  831          * turnstile from the free list and give it to the thread.
  832          */
  833         empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
  834             TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
  835         if (empty) {
  836                 tc = TC_LOOKUP(ts->ts_lockobj);
  837                 mtx_assert(&tc->tc_lock, MA_OWNED);
  838                 MPASS(LIST_EMPTY(&ts->ts_free));
  839 #ifdef TURNSTILE_PROFILING
  840                 tc->tc_depth--;
  841 #endif
  842         } else
  843                 ts = LIST_FIRST(&ts->ts_free);
  844         MPASS(ts != NULL);
  845         LIST_REMOVE(ts, ts_hash);
  846         td->td_turnstile = ts;
  847 
  848         return (empty);
  849 }
  850         
  851 /*
  852  * Put all blocked threads on the pending list.  This must be called with
  853  * the turnstile chain locked.
  854  */
  855 void
  856 turnstile_broadcast(struct turnstile *ts, int queue)
  857 {
  858         struct turnstile_chain *tc __unused;
  859         struct turnstile *ts1;
  860         struct thread *td;
  861 
  862         MPASS(ts != NULL);
  863         mtx_assert(&ts->ts_lock, MA_OWNED);
  864         MPASS(curthread->td_proc->p_magic == P_MAGIC);
  865         MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
  866         /*
  867          * We must have the chain locked so that we can remove the empty
  868          * turnstile from the hash queue.
  869          */
  870         tc = TC_LOOKUP(ts->ts_lockobj);
  871         mtx_assert(&tc->tc_lock, MA_OWNED);
  872         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
  873 
  874         /*
  875          * Transfer the blocked list to the pending list.
  876          */
  877         mtx_lock_spin(&td_contested_lock);
  878         TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
  879         mtx_unlock_spin(&td_contested_lock);
  880 
  881         /*
  882          * Give a turnstile to each thread.  The last thread gets
  883          * this turnstile if the turnstile is empty.
  884          */
  885         TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
  886                 if (LIST_EMPTY(&ts->ts_free)) {
  887                         MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
  888                         ts1 = ts;
  889 #ifdef TURNSTILE_PROFILING
  890                         tc->tc_depth--;
  891 #endif
  892                 } else
  893                         ts1 = LIST_FIRST(&ts->ts_free);
  894                 MPASS(ts1 != NULL);
  895                 LIST_REMOVE(ts1, ts_hash);
  896                 td->td_turnstile = ts1;
  897         }
  898 }
  899 
  900 /*
  901  * Wakeup all threads on the pending list and adjust the priority of the
  902  * current thread appropriately.  This must be called with the turnstile
  903  * chain locked.
  904  */
  905 void
  906 turnstile_unpend(struct turnstile *ts)
  907 {
  908         TAILQ_HEAD( ,thread) pending_threads;
  909         struct turnstile *nts;
  910         struct thread *td;
  911         u_char cp, pri;
  912 
  913         MPASS(ts != NULL);
  914         mtx_assert(&ts->ts_lock, MA_OWNED);
  915         MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
  916         MPASS(!TAILQ_EMPTY(&ts->ts_pending));
  917 
  918         /*
  919          * Move the list of pending threads out of the turnstile and
  920          * into a local variable.
  921          */
  922         TAILQ_INIT(&pending_threads);
  923         TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
  924 #ifdef INVARIANTS
  925         if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
  926             TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
  927                 ts->ts_lockobj = NULL;
  928 #endif
  929         /*
  930          * Adjust the priority of curthread based on other contested
  931          * locks it owns.  Don't lower the priority below the base
  932          * priority however.
  933          */
  934         td = curthread;
  935         pri = PRI_MAX;
  936         thread_lock(td);
  937         mtx_lock_spin(&td_contested_lock);
  938         /*
  939          * Remove the turnstile from this thread's list of contested locks
  940          * since this thread doesn't own it anymore.  New threads will
  941          * not be blocking on the turnstile until it is claimed by a new
  942          * owner.  There might not be a current owner if this is a shared
  943          * lock.
  944          */
  945         if (ts->ts_owner != NULL) {
  946                 ts->ts_owner = NULL;
  947                 LIST_REMOVE(ts, ts_link);
  948         }
  949         LIST_FOREACH(nts, &td->td_contested, ts_link) {
  950                 cp = turnstile_first_waiter(nts)->td_priority;
  951                 if (cp < pri)
  952                         pri = cp;
  953         }
  954         mtx_unlock_spin(&td_contested_lock);
  955         sched_unlend_prio(td, pri);
  956         thread_unlock(td);
  957         /*
  958          * Wake up all the pending threads.  If a thread is not blocked
  959          * on a lock, then it is currently executing on another CPU in
  960          * turnstile_wait() or sitting on a run queue waiting to resume
  961          * in turnstile_wait().  Set a flag to force it to try to acquire
  962          * the lock again instead of blocking.
  963          */
  964         while (!TAILQ_EMPTY(&pending_threads)) {
  965                 td = TAILQ_FIRST(&pending_threads);
  966                 TAILQ_REMOVE(&pending_threads, td, td_lockq);
  967                 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
  968                 thread_lock(td);
  969                 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  970                 MPASS(td->td_proc->p_magic == P_MAGIC);
  971                 MPASS(TD_ON_LOCK(td));
  972                 TD_CLR_LOCK(td);
  973                 MPASS(TD_CAN_RUN(td));
  974                 td->td_blocked = NULL;
  975                 td->td_lockname = NULL;
  976                 td->td_blktick = 0;
  977 #ifdef INVARIANTS
  978                 td->td_tsqueue = 0xff;
  979 #endif
  980                 sched_add(td, SRQ_BORING);
  981                 thread_unlock(td);
  982         }
  983         mtx_unlock_spin(&ts->ts_lock);
  984 }
  985 
  986 /*
  987  * Give up ownership of a turnstile.  This must be called with the
  988  * turnstile chain locked.
  989  */
  990 void
  991 turnstile_disown(struct turnstile *ts)
  992 {
  993         struct thread *td;
  994         u_char cp, pri;
  995 
  996         MPASS(ts != NULL);
  997         mtx_assert(&ts->ts_lock, MA_OWNED);
  998         MPASS(ts->ts_owner == curthread);
  999         MPASS(TAILQ_EMPTY(&ts->ts_pending));
 1000         MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
 1001             !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
 1002 
 1003         /*
 1004          * Remove the turnstile from this thread's list of contested locks
 1005          * since this thread doesn't own it anymore.  New threads will
 1006          * not be blocking on the turnstile until it is claimed by a new
 1007          * owner.
 1008          */
 1009         mtx_lock_spin(&td_contested_lock);
 1010         ts->ts_owner = NULL;
 1011         LIST_REMOVE(ts, ts_link);
 1012         mtx_unlock_spin(&td_contested_lock);
 1013 
 1014         /*
 1015          * Adjust the priority of curthread based on other contested
 1016          * locks it owns.  Don't lower the priority below the base
 1017          * priority however.
 1018          */
 1019         td = curthread;
 1020         pri = PRI_MAX;
 1021         thread_lock(td);
 1022         mtx_unlock_spin(&ts->ts_lock);
 1023         mtx_lock_spin(&td_contested_lock);
 1024         LIST_FOREACH(ts, &td->td_contested, ts_link) {
 1025                 cp = turnstile_first_waiter(ts)->td_priority;
 1026                 if (cp < pri)
 1027                         pri = cp;
 1028         }
 1029         mtx_unlock_spin(&td_contested_lock);
 1030         sched_unlend_prio(td, pri);
 1031         thread_unlock(td);
 1032 }
 1033 
 1034 /*
 1035  * Return the first thread in a turnstile.
 1036  */
 1037 struct thread *
 1038 turnstile_head(struct turnstile *ts, int queue)
 1039 {
 1040 #ifdef INVARIANTS
 1041 
 1042         MPASS(ts != NULL);
 1043         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
 1044         mtx_assert(&ts->ts_lock, MA_OWNED);
 1045 #endif
 1046         return (TAILQ_FIRST(&ts->ts_blocked[queue]));
 1047 }
 1048 
 1049 /*
 1050  * Returns true if a sub-queue of a turnstile is empty.
 1051  */
 1052 int
 1053 turnstile_empty(struct turnstile *ts, int queue)
 1054 {
 1055 #ifdef INVARIANTS
 1056 
 1057         MPASS(ts != NULL);
 1058         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
 1059         mtx_assert(&ts->ts_lock, MA_OWNED);
 1060 #endif
 1061         return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
 1062 }
 1063 
 1064 #ifdef DDB
 1065 static void
 1066 print_thread(struct thread *td, const char *prefix)
 1067 {
 1068 
 1069         db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
 1070             td->td_proc->p_pid, td->td_name);
 1071 }
 1072 
 1073 static void
 1074 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
 1075 {
 1076         struct thread *td;
 1077 
 1078         db_printf("%s:\n", header);
 1079         if (TAILQ_EMPTY(queue)) {
 1080                 db_printf("%sempty\n", prefix);
 1081                 return;
 1082         }
 1083         TAILQ_FOREACH(td, queue, td_lockq) {
 1084                 print_thread(td, prefix);
 1085         }
 1086 }
 1087 
 1088 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
 1089 {
 1090         struct turnstile_chain *tc;
 1091         struct turnstile *ts;
 1092         struct lock_object *lock;
 1093         int i;
 1094 
 1095         if (!have_addr)
 1096                 return;
 1097 
 1098         /*
 1099          * First, see if there is an active turnstile for the lock indicated
 1100          * by the address.
 1101          */
 1102         lock = (struct lock_object *)addr;
 1103         tc = TC_LOOKUP(lock);
 1104         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
 1105                 if (ts->ts_lockobj == lock)
 1106                         goto found;
 1107 
 1108         /*
 1109          * Second, see if there is an active turnstile at the address
 1110          * indicated.
 1111          */
 1112         for (i = 0; i < TC_TABLESIZE; i++)
 1113                 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
 1114                         if (ts == (struct turnstile *)addr)
 1115                                 goto found;
 1116                 }
 1117 
 1118         db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
 1119         return;
 1120 found:
 1121         lock = ts->ts_lockobj;
 1122         db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
 1123             lock->lo_name);
 1124         if (ts->ts_owner)
 1125                 print_thread(ts->ts_owner, "Lock Owner: ");
 1126         else
 1127                 db_printf("Lock Owner: none\n");
 1128         print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
 1129         print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
 1130             "\t");
 1131         print_queue(&ts->ts_pending, "Pending Threads", "\t");
 1132         
 1133 }
 1134 
 1135 /*
 1136  * Show all the threads a particular thread is waiting on based on
 1137  * non-spin locks.
 1138  */
 1139 static void
 1140 print_lockchain(struct thread *td, const char *prefix)
 1141 {
 1142         struct lock_object *lock;
 1143         struct lock_class *class;
 1144         struct turnstile *ts;
 1145         struct thread *owner;
 1146 
 1147         /*
 1148          * Follow the chain.  We keep walking as long as the thread is
 1149          * blocked on a lock that has an owner.
 1150          */
 1151         while (!db_pager_quit) {
 1152                 db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
 1153                     td->td_proc->p_pid, td->td_name);
 1154                 switch (td->td_state) {
 1155                 case TDS_INACTIVE:
 1156                         db_printf("is inactive\n");
 1157                         return;
 1158                 case TDS_CAN_RUN:
 1159                         db_printf("can run\n");
 1160                         return;
 1161                 case TDS_RUNQ:
 1162                         db_printf("is on a run queue\n");
 1163                         return;
 1164                 case TDS_RUNNING:
 1165                         db_printf("running on CPU %d\n", td->td_oncpu);
 1166                         return;
 1167                 case TDS_INHIBITED:
 1168                         if (TD_ON_LOCK(td)) {
 1169                                 ts = td->td_blocked;
 1170                                 lock = ts->ts_lockobj;
 1171                                 class = LOCK_CLASS(lock);
 1172                                 db_printf("blocked on lock %p (%s) \"%s\"\n",
 1173                                     lock, class->lc_name, lock->lo_name);
 1174                                 if (ts->ts_owner == NULL)
 1175                                         return;
 1176                                 td = ts->ts_owner;
 1177                                 break;
 1178                         } else if (TD_ON_SLEEPQ(td)) {
 1179                                 if (!lockmgr_chain(td, &owner) &&
 1180                                     !sx_chain(td, &owner)) {
 1181                                         db_printf("sleeping on %p \"%s\"\n",
 1182                                             td->td_wchan, td->td_wmesg);
 1183                                         return;
 1184                                 }
 1185                                 if (owner == NULL)
 1186                                         return;
 1187                                 td = owner;
 1188                                 break;
 1189                         }
 1190                         db_printf("inhibited\n");
 1191                         return;
 1192                 default:
 1193                         db_printf("??? (%#x)\n", td->td_state);
 1194                         return;
 1195                 }
 1196         }
 1197 }
 1198 
 1199 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
 1200 {
 1201         struct thread *td;
 1202 
 1203         /* Figure out which thread to start with. */
 1204         if (have_addr)
 1205                 td = db_lookup_thread(addr, true);
 1206         else
 1207                 td = kdb_thread;
 1208 
 1209         print_lockchain(td, "");
 1210 }
 1211 DB_SHOW_ALIAS(sleepchain, db_show_lockchain);
 1212 
 1213 DB_SHOW_ALL_COMMAND(chains, db_show_allchains)
 1214 {
 1215         struct thread *td;
 1216         struct proc *p;
 1217         int i;
 1218 
 1219         i = 1;
 1220         FOREACH_PROC_IN_SYSTEM(p) {
 1221                 FOREACH_THREAD_IN_PROC(p, td) {
 1222                         if ((TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested))
 1223                             || (TD_IS_INHIBITED(td) && TD_ON_SLEEPQ(td))) {
 1224                                 db_printf("chain %d:\n", i++);
 1225                                 print_lockchain(td, " ");
 1226                         }
 1227                         if (db_pager_quit)
 1228                                 return;
 1229                 }
 1230         }
 1231 }
 1232 DB_SHOW_ALIAS(allchains, db_show_allchains)
 1233 
 1234 static void     print_waiters(struct turnstile *ts, int indent);
 1235         
 1236 static void
 1237 print_waiter(struct thread *td, int indent)
 1238 {
 1239         struct turnstile *ts;
 1240         int i;
 1241 
 1242         if (db_pager_quit)
 1243                 return;
 1244         for (i = 0; i < indent; i++)
 1245                 db_printf(" ");
 1246         print_thread(td, "thread ");
 1247         LIST_FOREACH(ts, &td->td_contested, ts_link)
 1248                 print_waiters(ts, indent + 1);
 1249 }
 1250 
 1251 static void
 1252 print_waiters(struct turnstile *ts, int indent)
 1253 {
 1254         struct lock_object *lock;
 1255         struct lock_class *class;
 1256         struct thread *td;
 1257         int i;
 1258 
 1259         if (db_pager_quit)
 1260                 return;
 1261         lock = ts->ts_lockobj;
 1262         class = LOCK_CLASS(lock);
 1263         for (i = 0; i < indent; i++)
 1264                 db_printf(" ");
 1265         db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
 1266         TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
 1267                 print_waiter(td, indent + 1);
 1268         TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
 1269                 print_waiter(td, indent + 1);
 1270         TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
 1271                 print_waiter(td, indent + 1);
 1272 }
 1273 
 1274 DB_SHOW_COMMAND(locktree, db_show_locktree)
 1275 {
 1276         struct lock_object *lock;
 1277         struct lock_class *class;
 1278         struct turnstile_chain *tc;
 1279         struct turnstile *ts;
 1280 
 1281         if (!have_addr)
 1282                 return;
 1283         lock = (struct lock_object *)addr;
 1284         tc = TC_LOOKUP(lock);
 1285         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
 1286                 if (ts->ts_lockobj == lock)
 1287                         break;
 1288         if (ts == NULL) {
 1289                 class = LOCK_CLASS(lock);
 1290                 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
 1291                     lock->lo_name);
 1292         } else
 1293                 print_waiters(ts, 0);
 1294 }
 1295 #endif

Cache object: 61d469c6b578686a36241462ebb5a5cc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.