The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_turnstile.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause
    3  *
    4  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. Berkeley Software Design Inc's name may not be used to endorse or
   15  *    promote products derived from this software without specific prior
   16  *    written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  *      from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
   31  *      and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
   32  */
   33 
   34 /*
   35  * Implementation of turnstiles used to hold queue of threads blocked on
   36  * non-sleepable locks.  Sleepable locks use condition variables to
   37  * implement their queues.  Turnstiles differ from a sleep queue in that
   38  * turnstile queue's are assigned to a lock held by an owning thread.  Thus,
   39  * when one thread is enqueued onto a turnstile, it can lend its priority
   40  * to the owning thread.
   41  *
   42  * We wish to avoid bloating locks with an embedded turnstile and we do not
   43  * want to use back-pointers in the locks for the same reason.  Thus, we
   44  * use a similar approach to that of Solaris 7 as described in Solaris
   45  * Internals by Jim Mauro and Richard McDougall.  Turnstiles are looked up
   46  * in a hash table based on the address of the lock.  Each entry in the
   47  * hash table is a linked-lists of turnstiles and is called a turnstile
   48  * chain.  Each chain contains a spin mutex that protects all of the
   49  * turnstiles in the chain.
   50  *
   51  * Each time a thread is created, a turnstile is allocated from a UMA zone
   52  * and attached to that thread.  When a thread blocks on a lock, if it is the
   53  * first thread to block, it lends its turnstile to the lock.  If the lock
   54  * already has a turnstile, then it gives its turnstile to the lock's
   55  * turnstile's free list.  When a thread is woken up, it takes a turnstile from
   56  * the free list if there are any other waiters.  If it is the only thread
   57  * blocked on the lock, then it reclaims the turnstile associated with the lock
   58  * and removes it from the hash table.
   59  */
   60 
   61 #include <sys/cdefs.h>
   62 __FBSDID("$FreeBSD$");
   63 
   64 #include "opt_ddb.h"
   65 #include "opt_turnstile_profiling.h"
   66 #include "opt_sched.h"
   67 
   68 #include <sys/param.h>
   69 #include <sys/systm.h>
   70 #include <sys/kdb.h>
   71 #include <sys/kernel.h>
   72 #include <sys/ktr.h>
   73 #include <sys/lock.h>
   74 #include <sys/mutex.h>
   75 #include <sys/proc.h>
   76 #include <sys/queue.h>
   77 #include <sys/sched.h>
   78 #include <sys/sdt.h>
   79 #include <sys/sysctl.h>
   80 #include <sys/turnstile.h>
   81 
   82 #include <vm/uma.h>
   83 
   84 #ifdef DDB
   85 #include <ddb/ddb.h>
   86 #include <sys/lockmgr.h>
   87 #include <sys/sx.h>
   88 #endif
   89 
   90 /*
   91  * Constants for the hash table of turnstile chains.  TC_SHIFT is a magic
   92  * number chosen because the sleep queue's use the same value for the
   93  * shift.  Basically, we ignore the lower 8 bits of the address.
   94  * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
   95  */
   96 #define TC_TABLESIZE    128                     /* Must be power of 2. */
   97 #define TC_MASK         (TC_TABLESIZE - 1)
   98 #define TC_SHIFT        8
   99 #define TC_HASH(lock)   (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
  100 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
  101 
  102 /*
  103  * There are three different lists of turnstiles as follows.  The list
  104  * connected by ts_link entries is a per-thread list of all the turnstiles
  105  * attached to locks that we own.  This is used to fixup our priority when
  106  * a lock is released.  The other two lists use the ts_hash entries.  The
  107  * first of these two is the turnstile chain list that a turnstile is on
  108  * when it is attached to a lock.  The second list to use ts_hash is the
  109  * free list hung off of a turnstile that is attached to a lock.
  110  *
  111  * Each turnstile contains three lists of threads.  The two ts_blocked lists
  112  * are linked list of threads blocked on the turnstile's lock.  One list is
  113  * for exclusive waiters, and the other is for shared waiters.  The
  114  * ts_pending list is a linked list of threads previously awakened by
  115  * turnstile_signal() or turnstile_wait() that are waiting to be put on
  116  * the run queue.
  117  *
  118  * Locking key:
  119  *  c - turnstile chain lock
  120  *  q - td_contested lock
  121  */
  122 struct turnstile {
  123         struct mtx ts_lock;                     /* Spin lock for self. */
  124         struct threadqueue ts_blocked[2];       /* (c + q) Blocked threads. */
  125         struct threadqueue ts_pending;          /* (c) Pending threads. */
  126         LIST_ENTRY(turnstile) ts_hash;          /* (c) Chain and free list. */
  127         LIST_ENTRY(turnstile) ts_link;          /* (q) Contested locks. */
  128         LIST_HEAD(, turnstile) ts_free;         /* (c) Free turnstiles. */
  129         struct lock_object *ts_lockobj;         /* (c) Lock we reference. */
  130         struct thread *ts_owner;                /* (c + q) Who owns the lock. */
  131 };
  132 
  133 struct turnstile_chain {
  134         LIST_HEAD(, turnstile) tc_turnstiles;   /* List of turnstiles. */
  135         struct mtx tc_lock;                     /* Spin lock for this chain. */
  136 #ifdef TURNSTILE_PROFILING
  137         u_int   tc_depth;                       /* Length of tc_queues. */
  138         u_int   tc_max_depth;                   /* Max length of tc_queues. */
  139 #endif
  140 };
  141 
  142 #ifdef TURNSTILE_PROFILING
  143 u_int turnstile_max_depth;
  144 static SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  145     "turnstile profiling");
  146 static SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains,
  147     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
  148     "turnstile chain stats");
  149 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
  150     &turnstile_max_depth, 0, "maximum depth achieved of a single chain");
  151 #endif
  152 static struct mtx td_contested_lock;
  153 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
  154 static uma_zone_t turnstile_zone;
  155 
  156 /*
  157  * Prototypes for non-exported routines.
  158  */
  159 static void     init_turnstile0(void *dummy);
  160 #ifdef TURNSTILE_PROFILING
  161 static void     init_turnstile_profiling(void *arg);
  162 #endif
  163 static void     propagate_priority(struct thread *td);
  164 static int      turnstile_adjust_thread(struct turnstile *ts,
  165                     struct thread *td);
  166 static struct thread *turnstile_first_waiter(struct turnstile *ts);
  167 static void     turnstile_setowner(struct turnstile *ts, struct thread *owner);
  168 #ifdef INVARIANTS
  169 static void     turnstile_dtor(void *mem, int size, void *arg);
  170 #endif
  171 static int      turnstile_init(void *mem, int size, int flags);
  172 static void     turnstile_fini(void *mem, int size);
  173 
  174 SDT_PROVIDER_DECLARE(sched);
  175 SDT_PROBE_DEFINE(sched, , , sleep);
  176 SDT_PROBE_DEFINE2(sched, , , wakeup, "struct thread *", 
  177     "struct proc *");
  178 
  179 static inline void
  180 propagate_unlock_ts(struct turnstile *top, struct turnstile *ts)
  181 {
  182 
  183         if (ts != top)
  184                 mtx_unlock_spin(&ts->ts_lock);
  185 }
  186 
  187 static inline void
  188 propagate_unlock_td(struct turnstile *top, struct thread *td)
  189 {
  190 
  191         if (td->td_lock != &top->ts_lock)
  192                 thread_unlock(td);
  193 }
  194 
  195 /*
  196  * Walks the chain of turnstiles and their owners to propagate the priority
  197  * of the thread being blocked to all the threads holding locks that have to
  198  * release their locks before this thread can run again.
  199  */
  200 static void
  201 propagate_priority(struct thread *td)
  202 {
  203         struct turnstile *ts, *top;
  204         int pri;
  205 
  206         THREAD_LOCK_ASSERT(td, MA_OWNED);
  207         pri = td->td_priority;
  208         top = ts = td->td_blocked;
  209         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  210 
  211         /*
  212          * The original turnstile lock is held across the entire
  213          * operation.  We only ever lock down the chain so the lock
  214          * order is constant.
  215          */
  216         for (;;) {
  217                 td = ts->ts_owner;
  218 
  219                 if (td == NULL) {
  220                         /*
  221                          * This might be a read lock with no owner.  There's
  222                          * not much we can do, so just bail.
  223                          */
  224                         propagate_unlock_ts(top, ts);
  225                         return;
  226                 }
  227 
  228                 /*
  229                  * Wait for the thread lock to be stable and then only
  230                  * acquire if it is not the turnstile lock.
  231                  */
  232                 thread_lock_block_wait(td);
  233                 if (td->td_lock != &ts->ts_lock) {
  234                         thread_lock_flags(td, MTX_DUPOK);
  235                         propagate_unlock_ts(top, ts);
  236                 }
  237                 MPASS(td->td_proc != NULL);
  238                 MPASS(td->td_proc->p_magic == P_MAGIC);
  239 
  240                 /*
  241                  * If the thread is asleep, then we are probably about
  242                  * to deadlock.  To make debugging this easier, show
  243                  * backtrace of misbehaving thread and panic to not
  244                  * leave the kernel deadlocked.
  245                  */
  246                 if (TD_IS_SLEEPING(td)) {
  247                         printf(
  248                 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
  249                             td->td_tid, td->td_proc->p_pid);
  250                         kdb_backtrace_thread(td);
  251                         panic("sleeping thread");
  252                 }
  253 
  254                 /*
  255                  * If this thread already has higher priority than the
  256                  * thread that is being blocked, we are finished.
  257                  */
  258                 if (td->td_priority <= pri) {
  259                         propagate_unlock_td(top, td);
  260                         return;
  261                 }
  262 
  263                 /*
  264                  * Bump this thread's priority.
  265                  */
  266                 sched_lend_prio(td, pri);
  267 
  268                 /*
  269                  * If lock holder is actually running or on the run queue
  270                  * then we are done.
  271                  */
  272                 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
  273                         MPASS(td->td_blocked == NULL);
  274                         propagate_unlock_td(top, td);
  275                         return;
  276                 }
  277 
  278 #ifndef SMP
  279                 /*
  280                  * For UP, we check to see if td is curthread (this shouldn't
  281                  * ever happen however as it would mean we are in a deadlock.)
  282                  */
  283                 KASSERT(td != curthread, ("Deadlock detected"));
  284 #endif
  285 
  286                 /*
  287                  * If we aren't blocked on a lock, we should be.
  288                  */
  289                 KASSERT(TD_ON_LOCK(td), (
  290                     "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
  291                     td->td_tid, td->td_name, td->td_state,
  292                     ts->ts_lockobj->lo_name));
  293 
  294                 /*
  295                  * Pick up the lock that td is blocked on.
  296                  */
  297                 ts = td->td_blocked;
  298                 MPASS(ts != NULL);
  299                 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  300                 /* Resort td on the list if needed. */
  301                 if (!turnstile_adjust_thread(ts, td)) {
  302                         propagate_unlock_ts(top, ts);
  303                         return;
  304                 }
  305                 /* The thread lock is released as ts lock above. */
  306         }
  307 }
  308 
  309 /*
  310  * Adjust the thread's position on a turnstile after its priority has been
  311  * changed.
  312  */
  313 static int
  314 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
  315 {
  316         struct thread *td1, *td2;
  317         int queue;
  318 
  319         THREAD_LOCK_ASSERT(td, MA_OWNED);
  320         MPASS(TD_ON_LOCK(td));
  321 
  322         /*
  323          * This thread may not be blocked on this turnstile anymore
  324          * but instead might already be woken up on another CPU
  325          * that is waiting on the thread lock in turnstile_unpend() to
  326          * finish waking this thread up.  We can detect this case
  327          * by checking to see if this thread has been given a
  328          * turnstile by either turnstile_signal() or
  329          * turnstile_broadcast().  In this case, treat the thread as
  330          * if it was already running.
  331          */
  332         if (td->td_turnstile != NULL)
  333                 return (0);
  334 
  335         /*
  336          * Check if the thread needs to be moved on the blocked chain.
  337          * It needs to be moved if either its priority is lower than
  338          * the previous thread or higher than the next thread.
  339          */
  340         THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock);
  341         td1 = TAILQ_PREV(td, threadqueue, td_lockq);
  342         td2 = TAILQ_NEXT(td, td_lockq);
  343         if ((td1 != NULL && td->td_priority < td1->td_priority) ||
  344             (td2 != NULL && td->td_priority > td2->td_priority)) {
  345                 /*
  346                  * Remove thread from blocked chain and determine where
  347                  * it should be moved to.
  348                  */
  349                 queue = td->td_tsqueue;
  350                 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
  351                 mtx_lock_spin(&td_contested_lock);
  352                 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
  353                 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
  354                         MPASS(td1->td_proc->p_magic == P_MAGIC);
  355                         if (td1->td_priority > td->td_priority)
  356                                 break;
  357                 }
  358 
  359                 if (td1 == NULL)
  360                         TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
  361                 else
  362                         TAILQ_INSERT_BEFORE(td1, td, td_lockq);
  363                 mtx_unlock_spin(&td_contested_lock);
  364                 if (td1 == NULL)
  365                         CTR3(KTR_LOCK,
  366                     "turnstile_adjust_thread: td %d put at tail on [%p] %s",
  367                             td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
  368                 else
  369                         CTR4(KTR_LOCK,
  370                     "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
  371                             td->td_tid, td1->td_tid, ts->ts_lockobj,
  372                             ts->ts_lockobj->lo_name);
  373         }
  374         return (1);
  375 }
  376 
  377 /*
  378  * Early initialization of turnstiles.  This is not done via a SYSINIT()
  379  * since this needs to be initialized very early when mutexes are first
  380  * initialized.
  381  */
  382 void
  383 init_turnstiles(void)
  384 {
  385         int i;
  386 
  387         for (i = 0; i < TC_TABLESIZE; i++) {
  388                 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
  389                 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
  390                     NULL, MTX_SPIN);
  391         }
  392         mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
  393         LIST_INIT(&thread0.td_contested);
  394         thread0.td_turnstile = NULL;
  395 }
  396 
  397 #ifdef TURNSTILE_PROFILING
  398 static void
  399 init_turnstile_profiling(void *arg)
  400 {
  401         struct sysctl_oid *chain_oid;
  402         char chain_name[10];
  403         int i;
  404 
  405         for (i = 0; i < TC_TABLESIZE; i++) {
  406                 snprintf(chain_name, sizeof(chain_name), "%d", i);
  407                 chain_oid = SYSCTL_ADD_NODE(NULL, 
  408                     SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
  409                     chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
  410                     "turnstile chain stats");
  411                 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
  412                     "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
  413                     NULL);
  414                 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
  415                     "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
  416                     0, NULL);
  417         }
  418 }
  419 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
  420     init_turnstile_profiling, NULL);
  421 #endif
  422 
  423 static void
  424 init_turnstile0(void *dummy)
  425 {
  426 
  427         turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
  428             NULL,
  429 #ifdef INVARIANTS
  430             turnstile_dtor,
  431 #else
  432             NULL,
  433 #endif
  434             turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
  435         thread0.td_turnstile = turnstile_alloc();
  436 }
  437 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
  438 
  439 /*
  440  * Update a thread on the turnstile list after it's priority has been changed.
  441  * The old priority is passed in as an argument.
  442  */
  443 void
  444 turnstile_adjust(struct thread *td, u_char oldpri)
  445 {
  446         struct turnstile *ts;
  447 
  448         MPASS(TD_ON_LOCK(td));
  449 
  450         /*
  451          * Pick up the lock that td is blocked on.
  452          */
  453         ts = td->td_blocked;
  454         MPASS(ts != NULL);
  455         THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock);
  456         mtx_assert(&ts->ts_lock, MA_OWNED);
  457 
  458         /* Resort the turnstile on the list. */
  459         if (!turnstile_adjust_thread(ts, td))
  460                 return;
  461         /*
  462          * If our priority was lowered and we are at the head of the
  463          * turnstile, then propagate our new priority up the chain.
  464          * Note that we currently don't try to revoke lent priorities
  465          * when our priority goes up.
  466          */
  467         MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
  468             td->td_tsqueue == TS_SHARED_QUEUE);
  469         if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
  470             td->td_priority < oldpri) {
  471                 propagate_priority(td);
  472         }
  473 }
  474 
  475 /*
  476  * Set the owner of the lock this turnstile is attached to.
  477  */
  478 static void
  479 turnstile_setowner(struct turnstile *ts, struct thread *owner)
  480 {
  481 
  482         mtx_assert(&td_contested_lock, MA_OWNED);
  483         MPASS(ts->ts_owner == NULL);
  484 
  485         /* A shared lock might not have an owner. */
  486         if (owner == NULL)
  487                 return;
  488 
  489         MPASS(owner->td_proc->p_magic == P_MAGIC);
  490         ts->ts_owner = owner;
  491         LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
  492 }
  493 
  494 #ifdef INVARIANTS
  495 /*
  496  * UMA zone item deallocator.
  497  */
  498 static void
  499 turnstile_dtor(void *mem, int size, void *arg)
  500 {
  501         struct turnstile *ts;
  502 
  503         ts = mem;
  504         MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
  505         MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
  506         MPASS(TAILQ_EMPTY(&ts->ts_pending));
  507 }
  508 #endif
  509 
  510 /*
  511  * UMA zone item initializer.
  512  */
  513 static int
  514 turnstile_init(void *mem, int size, int flags)
  515 {
  516         struct turnstile *ts;
  517 
  518         bzero(mem, size);
  519         ts = mem;
  520         TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
  521         TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
  522         TAILQ_INIT(&ts->ts_pending);
  523         LIST_INIT(&ts->ts_free);
  524         mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN);
  525         return (0);
  526 }
  527 
  528 static void
  529 turnstile_fini(void *mem, int size)
  530 {
  531         struct turnstile *ts;
  532 
  533         ts = mem;
  534         mtx_destroy(&ts->ts_lock);
  535 }
  536 
  537 /*
  538  * Get a turnstile for a new thread.
  539  */
  540 struct turnstile *
  541 turnstile_alloc(void)
  542 {
  543 
  544         return (uma_zalloc(turnstile_zone, M_WAITOK));
  545 }
  546 
  547 /*
  548  * Free a turnstile when a thread is destroyed.
  549  */
  550 void
  551 turnstile_free(struct turnstile *ts)
  552 {
  553 
  554         uma_zfree(turnstile_zone, ts);
  555 }
  556 
  557 /*
  558  * Lock the turnstile chain associated with the specified lock.
  559  */
  560 void
  561 turnstile_chain_lock(struct lock_object *lock)
  562 {
  563         struct turnstile_chain *tc;
  564 
  565         tc = TC_LOOKUP(lock);
  566         mtx_lock_spin(&tc->tc_lock);
  567 }
  568 
  569 struct turnstile *
  570 turnstile_trywait(struct lock_object *lock)
  571 {
  572         struct turnstile_chain *tc;
  573         struct turnstile *ts;
  574 
  575         tc = TC_LOOKUP(lock);
  576         mtx_lock_spin(&tc->tc_lock);
  577         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
  578                 if (ts->ts_lockobj == lock) {
  579                         mtx_lock_spin(&ts->ts_lock);
  580                         return (ts);
  581                 }
  582 
  583         ts = curthread->td_turnstile;
  584         MPASS(ts != NULL);
  585         mtx_lock_spin(&ts->ts_lock);
  586         KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
  587         ts->ts_lockobj = lock;
  588 
  589         return (ts);
  590 }
  591 
  592 bool
  593 turnstile_lock(struct turnstile *ts, struct lock_object **lockp,
  594     struct thread **tdp)
  595 {
  596         struct turnstile_chain *tc;
  597         struct lock_object *lock;
  598 
  599         if ((lock = ts->ts_lockobj) == NULL)
  600                 return (false);
  601         tc = TC_LOOKUP(lock);
  602         mtx_lock_spin(&tc->tc_lock);
  603         mtx_lock_spin(&ts->ts_lock);
  604         if (__predict_false(lock != ts->ts_lockobj)) {
  605                 mtx_unlock_spin(&tc->tc_lock);
  606                 mtx_unlock_spin(&ts->ts_lock);
  607                 return (false);
  608         }
  609         *lockp = lock;
  610         *tdp = ts->ts_owner;
  611         return (true);
  612 }
  613 
  614 void
  615 turnstile_unlock(struct turnstile *ts, struct lock_object *lock)
  616 {
  617         struct turnstile_chain *tc;
  618 
  619         mtx_assert(&ts->ts_lock, MA_OWNED);
  620         mtx_unlock_spin(&ts->ts_lock);
  621         if (ts == curthread->td_turnstile)
  622                 ts->ts_lockobj = NULL;
  623         tc = TC_LOOKUP(lock);
  624         mtx_unlock_spin(&tc->tc_lock);
  625 }
  626 
  627 void
  628 turnstile_assert(struct turnstile *ts)
  629 {
  630         MPASS(ts->ts_lockobj == NULL);
  631 }
  632 
  633 void
  634 turnstile_cancel(struct turnstile *ts)
  635 {
  636         struct turnstile_chain *tc;
  637         struct lock_object *lock;
  638 
  639         mtx_assert(&ts->ts_lock, MA_OWNED);
  640 
  641         mtx_unlock_spin(&ts->ts_lock);
  642         lock = ts->ts_lockobj;
  643         if (ts == curthread->td_turnstile)
  644                 ts->ts_lockobj = NULL;
  645         tc = TC_LOOKUP(lock);
  646         mtx_unlock_spin(&tc->tc_lock);
  647 }
  648 
  649 /*
  650  * Look up the turnstile for a lock in the hash table locking the associated
  651  * turnstile chain along the way.  If no turnstile is found in the hash
  652  * table, NULL is returned.
  653  */
  654 struct turnstile *
  655 turnstile_lookup(struct lock_object *lock)
  656 {
  657         struct turnstile_chain *tc;
  658         struct turnstile *ts;
  659 
  660         tc = TC_LOOKUP(lock);
  661         mtx_assert(&tc->tc_lock, MA_OWNED);
  662         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
  663                 if (ts->ts_lockobj == lock) {
  664                         mtx_lock_spin(&ts->ts_lock);
  665                         return (ts);
  666                 }
  667         return (NULL);
  668 }
  669 
  670 /*
  671  * Unlock the turnstile chain associated with a given lock.
  672  */
  673 void
  674 turnstile_chain_unlock(struct lock_object *lock)
  675 {
  676         struct turnstile_chain *tc;
  677 
  678         tc = TC_LOOKUP(lock);
  679         mtx_unlock_spin(&tc->tc_lock);
  680 }
  681 
  682 /*
  683  * Return a pointer to the thread waiting on this turnstile with the
  684  * most important priority or NULL if the turnstile has no waiters.
  685  */
  686 static struct thread *
  687 turnstile_first_waiter(struct turnstile *ts)
  688 {
  689         struct thread *std, *xtd;
  690 
  691         std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
  692         xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
  693         if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
  694                 return (std);
  695         return (xtd);
  696 }
  697 
  698 /*
  699  * Take ownership of a turnstile and adjust the priority of the new
  700  * owner appropriately.
  701  */
  702 void
  703 turnstile_claim(struct turnstile *ts)
  704 {
  705         struct thread *td, *owner;
  706         struct turnstile_chain *tc;
  707 
  708         mtx_assert(&ts->ts_lock, MA_OWNED);
  709         MPASS(ts != curthread->td_turnstile);
  710 
  711         owner = curthread;
  712         mtx_lock_spin(&td_contested_lock);
  713         turnstile_setowner(ts, owner);
  714         mtx_unlock_spin(&td_contested_lock);
  715 
  716         td = turnstile_first_waiter(ts);
  717         MPASS(td != NULL);
  718         MPASS(td->td_proc->p_magic == P_MAGIC);
  719         THREAD_LOCKPTR_BLOCKED_ASSERT(td, &ts->ts_lock);
  720 
  721         /*
  722          * Update the priority of the new owner if needed.
  723          */
  724         thread_lock(owner);
  725         if (td->td_priority < owner->td_priority)
  726                 sched_lend_prio(owner, td->td_priority);
  727         thread_unlock(owner);
  728         tc = TC_LOOKUP(ts->ts_lockobj);
  729         mtx_unlock_spin(&ts->ts_lock);
  730         mtx_unlock_spin(&tc->tc_lock);
  731 }
  732 
  733 /*
  734  * Block the current thread on the turnstile assicated with 'lock'.  This
  735  * function will context switch and not return until this thread has been
  736  * woken back up.  This function must be called with the appropriate
  737  * turnstile chain locked and will return with it unlocked.
  738  */
  739 void
  740 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
  741 {
  742         struct turnstile_chain *tc;
  743         struct thread *td, *td1;
  744         struct lock_object *lock;
  745 
  746         td = curthread;
  747         mtx_assert(&ts->ts_lock, MA_OWNED);
  748         if (owner)
  749                 MPASS(owner->td_proc->p_magic == P_MAGIC);
  750         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
  751 
  752         /*
  753          * If the lock does not already have a turnstile, use this thread's
  754          * turnstile.  Otherwise insert the current thread into the
  755          * turnstile already in use by this lock.
  756          */
  757         tc = TC_LOOKUP(ts->ts_lockobj);
  758         mtx_assert(&tc->tc_lock, MA_OWNED);
  759         if (ts == td->td_turnstile) {
  760 #ifdef TURNSTILE_PROFILING
  761                 tc->tc_depth++;
  762                 if (tc->tc_depth > tc->tc_max_depth) {
  763                         tc->tc_max_depth = tc->tc_depth;
  764                         if (tc->tc_max_depth > turnstile_max_depth)
  765                                 turnstile_max_depth = tc->tc_max_depth;
  766                 }
  767 #endif
  768                 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
  769                 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
  770                     ("thread's turnstile has pending threads"));
  771                 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
  772                     ("thread's turnstile has exclusive waiters"));
  773                 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
  774                     ("thread's turnstile has shared waiters"));
  775                 KASSERT(LIST_EMPTY(&ts->ts_free),
  776                     ("thread's turnstile has a non-empty free list"));
  777                 MPASS(ts->ts_lockobj != NULL);
  778                 mtx_lock_spin(&td_contested_lock);
  779                 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
  780                 turnstile_setowner(ts, owner);
  781                 mtx_unlock_spin(&td_contested_lock);
  782         } else {
  783                 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
  784                         if (td1->td_priority > td->td_priority)
  785                                 break;
  786                 mtx_lock_spin(&td_contested_lock);
  787                 if (td1 != NULL)
  788                         TAILQ_INSERT_BEFORE(td1, td, td_lockq);
  789                 else
  790                         TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
  791                 MPASS(owner == ts->ts_owner);
  792                 mtx_unlock_spin(&td_contested_lock);
  793                 MPASS(td->td_turnstile != NULL);
  794                 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
  795         }
  796         thread_lock(td);
  797         thread_lock_set(td, &ts->ts_lock);
  798         td->td_turnstile = NULL;
  799 
  800         /* Save who we are blocked on and switch. */
  801         lock = ts->ts_lockobj;
  802         td->td_tsqueue = queue;
  803         td->td_blocked = ts;
  804         td->td_lockname = lock->lo_name;
  805         td->td_blktick = ticks;
  806         TD_SET_LOCK(td);
  807         mtx_unlock_spin(&tc->tc_lock);
  808         propagate_priority(td);
  809 
  810         if (LOCK_LOG_TEST(lock, 0))
  811                 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
  812                     td->td_tid, lock, lock->lo_name);
  813 
  814         SDT_PROBE0(sched, , , sleep);
  815 
  816         THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
  817         mi_switch(SW_VOL | SWT_TURNSTILE);
  818 
  819         if (LOCK_LOG_TEST(lock, 0))
  820                 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
  821                     __func__, td->td_tid, lock, lock->lo_name);
  822 }
  823 
  824 /*
  825  * Pick the highest priority thread on this turnstile and put it on the
  826  * pending list.  This must be called with the turnstile chain locked.
  827  */
  828 int
  829 turnstile_signal(struct turnstile *ts, int queue)
  830 {
  831         struct turnstile_chain *tc __unused;
  832         struct thread *td;
  833         int empty;
  834 
  835         MPASS(ts != NULL);
  836         mtx_assert(&ts->ts_lock, MA_OWNED);
  837         MPASS(curthread->td_proc->p_magic == P_MAGIC);
  838         MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
  839         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
  840 
  841         /*
  842          * Pick the highest priority thread blocked on this lock and
  843          * move it to the pending list.
  844          */
  845         td = TAILQ_FIRST(&ts->ts_blocked[queue]);
  846         MPASS(td->td_proc->p_magic == P_MAGIC);
  847         mtx_lock_spin(&td_contested_lock);
  848         TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
  849         mtx_unlock_spin(&td_contested_lock);
  850         TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
  851 
  852         /*
  853          * If the turnstile is now empty, remove it from its chain and
  854          * give it to the about-to-be-woken thread.  Otherwise take a
  855          * turnstile from the free list and give it to the thread.
  856          */
  857         empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
  858             TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
  859         if (empty) {
  860                 tc = TC_LOOKUP(ts->ts_lockobj);
  861                 mtx_assert(&tc->tc_lock, MA_OWNED);
  862                 MPASS(LIST_EMPTY(&ts->ts_free));
  863 #ifdef TURNSTILE_PROFILING
  864                 tc->tc_depth--;
  865 #endif
  866         } else
  867                 ts = LIST_FIRST(&ts->ts_free);
  868         MPASS(ts != NULL);
  869         LIST_REMOVE(ts, ts_hash);
  870         td->td_turnstile = ts;
  871 
  872         return (empty);
  873 }
  874 
  875 /*
  876  * Put all blocked threads on the pending list.  This must be called with
  877  * the turnstile chain locked.
  878  */
  879 void
  880 turnstile_broadcast(struct turnstile *ts, int queue)
  881 {
  882         struct turnstile_chain *tc __unused;
  883         struct turnstile *ts1;
  884         struct thread *td;
  885 
  886         MPASS(ts != NULL);
  887         mtx_assert(&ts->ts_lock, MA_OWNED);
  888         MPASS(curthread->td_proc->p_magic == P_MAGIC);
  889         MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
  890         /*
  891          * We must have the chain locked so that we can remove the empty
  892          * turnstile from the hash queue.
  893          */
  894         tc = TC_LOOKUP(ts->ts_lockobj);
  895         mtx_assert(&tc->tc_lock, MA_OWNED);
  896         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
  897 
  898         /*
  899          * Transfer the blocked list to the pending list.
  900          */
  901         mtx_lock_spin(&td_contested_lock);
  902         TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
  903         mtx_unlock_spin(&td_contested_lock);
  904 
  905         /*
  906          * Give a turnstile to each thread.  The last thread gets
  907          * this turnstile if the turnstile is empty.
  908          */
  909         TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
  910                 if (LIST_EMPTY(&ts->ts_free)) {
  911                         MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
  912                         ts1 = ts;
  913 #ifdef TURNSTILE_PROFILING
  914                         tc->tc_depth--;
  915 #endif
  916                 } else
  917                         ts1 = LIST_FIRST(&ts->ts_free);
  918                 MPASS(ts1 != NULL);
  919                 LIST_REMOVE(ts1, ts_hash);
  920                 td->td_turnstile = ts1;
  921         }
  922 }
  923 
  924 static u_char
  925 turnstile_calc_unlend_prio_locked(struct thread *td)
  926 {
  927         struct turnstile *nts;
  928         u_char cp, pri;
  929 
  930         THREAD_LOCK_ASSERT(td, MA_OWNED);
  931         mtx_assert(&td_contested_lock, MA_OWNED);
  932 
  933         pri = PRI_MAX;
  934         LIST_FOREACH(nts, &td->td_contested, ts_link) {
  935                 cp = turnstile_first_waiter(nts)->td_priority;
  936                 if (cp < pri)
  937                         pri = cp;
  938         }
  939         return (pri);
  940 }
  941 
  942 /*
  943  * Wakeup all threads on the pending list and adjust the priority of the
  944  * current thread appropriately.  This must be called with the turnstile
  945  * chain locked.
  946  */
  947 void
  948 turnstile_unpend(struct turnstile *ts)
  949 {
  950         TAILQ_HEAD( ,thread) pending_threads;
  951         struct thread *td;
  952         u_char pri;
  953 
  954         MPASS(ts != NULL);
  955         mtx_assert(&ts->ts_lock, MA_OWNED);
  956         MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
  957         MPASS(!TAILQ_EMPTY(&ts->ts_pending));
  958 
  959         /*
  960          * Move the list of pending threads out of the turnstile and
  961          * into a local variable.
  962          */
  963         TAILQ_INIT(&pending_threads);
  964         TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
  965 #ifdef INVARIANTS
  966         if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
  967             TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
  968                 ts->ts_lockobj = NULL;
  969 #endif
  970         /*
  971          * Adjust the priority of curthread based on other contested
  972          * locks it owns.  Don't lower the priority below the base
  973          * priority however.
  974          */
  975         td = curthread;
  976         thread_lock(td);
  977         mtx_lock_spin(&td_contested_lock);
  978         /*
  979          * Remove the turnstile from this thread's list of contested locks
  980          * since this thread doesn't own it anymore.  New threads will
  981          * not be blocking on the turnstile until it is claimed by a new
  982          * owner.  There might not be a current owner if this is a shared
  983          * lock.
  984          */
  985         if (ts->ts_owner != NULL) {
  986                 ts->ts_owner = NULL;
  987                 LIST_REMOVE(ts, ts_link);
  988         }
  989         pri = turnstile_calc_unlend_prio_locked(td);
  990         mtx_unlock_spin(&td_contested_lock);
  991         sched_unlend_prio(td, pri);
  992         thread_unlock(td);
  993         /*
  994          * Wake up all the pending threads.  If a thread is not blocked
  995          * on a lock, then it is currently executing on another CPU in
  996          * turnstile_wait() or sitting on a run queue waiting to resume
  997          * in turnstile_wait().  Set a flag to force it to try to acquire
  998          * the lock again instead of blocking.
  999          */
 1000         while (!TAILQ_EMPTY(&pending_threads)) {
 1001                 td = TAILQ_FIRST(&pending_threads);
 1002                 TAILQ_REMOVE(&pending_threads, td, td_lockq);
 1003                 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
 1004                 thread_lock_block_wait(td);
 1005                 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
 1006                 MPASS(td->td_proc->p_magic == P_MAGIC);
 1007                 MPASS(TD_ON_LOCK(td));
 1008                 TD_CLR_LOCK(td);
 1009                 MPASS(TD_CAN_RUN(td));
 1010                 td->td_blocked = NULL;
 1011                 td->td_lockname = NULL;
 1012                 td->td_blktick = 0;
 1013 #ifdef INVARIANTS
 1014                 td->td_tsqueue = 0xff;
 1015 #endif
 1016                 sched_add(td, SRQ_HOLD | SRQ_BORING);
 1017         }
 1018         mtx_unlock_spin(&ts->ts_lock);
 1019 }
 1020 
 1021 /*
 1022  * Give up ownership of a turnstile.  This must be called with the
 1023  * turnstile chain locked.
 1024  */
 1025 void
 1026 turnstile_disown(struct turnstile *ts)
 1027 {
 1028         struct thread *td;
 1029         u_char pri;
 1030 
 1031         MPASS(ts != NULL);
 1032         mtx_assert(&ts->ts_lock, MA_OWNED);
 1033         MPASS(ts->ts_owner == curthread);
 1034         MPASS(TAILQ_EMPTY(&ts->ts_pending));
 1035         MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
 1036             !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
 1037 
 1038         /*
 1039          * Remove the turnstile from this thread's list of contested locks
 1040          * since this thread doesn't own it anymore.  New threads will
 1041          * not be blocking on the turnstile until it is claimed by a new
 1042          * owner.
 1043          */
 1044         mtx_lock_spin(&td_contested_lock);
 1045         ts->ts_owner = NULL;
 1046         LIST_REMOVE(ts, ts_link);
 1047         mtx_unlock_spin(&td_contested_lock);
 1048 
 1049         /*
 1050          * Adjust the priority of curthread based on other contested
 1051          * locks it owns.  Don't lower the priority below the base
 1052          * priority however.
 1053          */
 1054         td = curthread;
 1055         thread_lock(td);
 1056         mtx_unlock_spin(&ts->ts_lock);
 1057         mtx_lock_spin(&td_contested_lock);
 1058         pri = turnstile_calc_unlend_prio_locked(td);
 1059         mtx_unlock_spin(&td_contested_lock);
 1060         sched_unlend_prio(td, pri);
 1061         thread_unlock(td);
 1062 }
 1063 
 1064 /*
 1065  * Return the first thread in a turnstile.
 1066  */
 1067 struct thread *
 1068 turnstile_head(struct turnstile *ts, int queue)
 1069 {
 1070 #ifdef INVARIANTS
 1071 
 1072         MPASS(ts != NULL);
 1073         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
 1074         mtx_assert(&ts->ts_lock, MA_OWNED);
 1075 #endif
 1076         return (TAILQ_FIRST(&ts->ts_blocked[queue]));
 1077 }
 1078 
 1079 /*
 1080  * Returns true if a sub-queue of a turnstile is empty.
 1081  */
 1082 int
 1083 turnstile_empty(struct turnstile *ts, int queue)
 1084 {
 1085 #ifdef INVARIANTS
 1086 
 1087         MPASS(ts != NULL);
 1088         MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
 1089         mtx_assert(&ts->ts_lock, MA_OWNED);
 1090 #endif
 1091         return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
 1092 }
 1093 
 1094 #ifdef DDB
 1095 static void
 1096 print_thread(struct thread *td, const char *prefix)
 1097 {
 1098 
 1099         db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
 1100             td->td_proc->p_pid, td->td_name);
 1101 }
 1102 
 1103 static void
 1104 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
 1105 {
 1106         struct thread *td;
 1107 
 1108         db_printf("%s:\n", header);
 1109         if (TAILQ_EMPTY(queue)) {
 1110                 db_printf("%sempty\n", prefix);
 1111                 return;
 1112         }
 1113         TAILQ_FOREACH(td, queue, td_lockq) {
 1114                 print_thread(td, prefix);
 1115         }
 1116 }
 1117 
 1118 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
 1119 {
 1120         struct turnstile_chain *tc;
 1121         struct turnstile *ts;
 1122         struct lock_object *lock;
 1123         int i;
 1124 
 1125         if (!have_addr)
 1126                 return;
 1127 
 1128         /*
 1129          * First, see if there is an active turnstile for the lock indicated
 1130          * by the address.
 1131          */
 1132         lock = (struct lock_object *)addr;
 1133         tc = TC_LOOKUP(lock);
 1134         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
 1135                 if (ts->ts_lockobj == lock)
 1136                         goto found;
 1137 
 1138         /*
 1139          * Second, see if there is an active turnstile at the address
 1140          * indicated.
 1141          */
 1142         for (i = 0; i < TC_TABLESIZE; i++)
 1143                 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
 1144                         if (ts == (struct turnstile *)addr)
 1145                                 goto found;
 1146                 }
 1147 
 1148         db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
 1149         return;
 1150 found:
 1151         lock = ts->ts_lockobj;
 1152         db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
 1153             lock->lo_name);
 1154         if (ts->ts_owner)
 1155                 print_thread(ts->ts_owner, "Lock Owner: ");
 1156         else
 1157                 db_printf("Lock Owner: none\n");
 1158         print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
 1159         print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
 1160             "\t");
 1161         print_queue(&ts->ts_pending, "Pending Threads", "\t");
 1162 
 1163 }
 1164 
 1165 /*
 1166  * Show all the threads a particular thread is waiting on based on
 1167  * non-spin locks.
 1168  */
 1169 static void
 1170 print_lockchain(struct thread *td, const char *prefix)
 1171 {
 1172         struct lock_object *lock;
 1173         struct lock_class *class;
 1174         struct turnstile *ts;
 1175         struct thread *owner;
 1176 
 1177         /*
 1178          * Follow the chain.  We keep walking as long as the thread is
 1179          * blocked on a lock that has an owner.
 1180          */
 1181         while (!db_pager_quit) {
 1182                 if (td == (void *)LK_KERNPROC) {
 1183                         db_printf("%sdisowned (LK_KERNPROC)\n", prefix);
 1184                         return;
 1185                 }
 1186                 db_printf("%sthread %d (pid %d, %s) is ", prefix, td->td_tid,
 1187                     td->td_proc->p_pid, td->td_name);
 1188                 switch (td->td_state) {
 1189                 case TDS_INACTIVE:
 1190                         db_printf("inactive\n");
 1191                         return;
 1192                 case TDS_CAN_RUN:
 1193                         db_printf("runnable\n");
 1194                         return;
 1195                 case TDS_RUNQ:
 1196                         db_printf("on a run queue\n");
 1197                         return;
 1198                 case TDS_RUNNING:
 1199                         db_printf("running on CPU %d\n", td->td_oncpu);
 1200                         return;
 1201                 case TDS_INHIBITED:
 1202                         if (TD_ON_LOCK(td)) {
 1203                                 ts = td->td_blocked;
 1204                                 lock = ts->ts_lockobj;
 1205                                 class = LOCK_CLASS(lock);
 1206                                 db_printf("blocked on lock %p (%s) \"%s\"\n",
 1207                                     lock, class->lc_name, lock->lo_name);
 1208                                 if (ts->ts_owner == NULL)
 1209                                         return;
 1210                                 td = ts->ts_owner;
 1211                                 break;
 1212                         } else if (TD_ON_SLEEPQ(td)) {
 1213                                 if (!lockmgr_chain(td, &owner) &&
 1214                                     !sx_chain(td, &owner)) {
 1215                                         db_printf("sleeping on %p \"%s\"\n",
 1216                                             td->td_wchan, td->td_wmesg);
 1217                                         return;
 1218                                 }
 1219                                 if (owner == NULL)
 1220                                         return;
 1221                                 td = owner;
 1222                                 break;
 1223                         }
 1224                         db_printf("inhibited: %s\n", KTDSTATE(td));
 1225                         return;
 1226                 default:
 1227                         db_printf("??? (%#x)\n", td->td_state);
 1228                         return;
 1229                 }
 1230         }
 1231 }
 1232 
 1233 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
 1234 {
 1235         struct thread *td;
 1236 
 1237         /* Figure out which thread to start with. */
 1238         if (have_addr)
 1239                 td = db_lookup_thread(addr, true);
 1240         else
 1241                 td = kdb_thread;
 1242 
 1243         print_lockchain(td, "");
 1244 }
 1245 DB_SHOW_ALIAS(sleepchain, db_show_lockchain);
 1246 
 1247 DB_SHOW_ALL_COMMAND(chains, db_show_allchains)
 1248 {
 1249         struct thread *td;
 1250         struct proc *p;
 1251         int i;
 1252 
 1253         i = 1;
 1254         FOREACH_PROC_IN_SYSTEM(p) {
 1255                 FOREACH_THREAD_IN_PROC(p, td) {
 1256                         if ((TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested))
 1257                             || (TD_IS_INHIBITED(td) && TD_ON_SLEEPQ(td))) {
 1258                                 db_printf("chain %d:\n", i++);
 1259                                 print_lockchain(td, " ");
 1260                         }
 1261                         if (db_pager_quit)
 1262                                 return;
 1263                 }
 1264         }
 1265 }
 1266 DB_SHOW_ALIAS(allchains, db_show_allchains)
 1267 
 1268 static void     print_waiters(struct turnstile *ts, int indent);
 1269 
 1270 static void
 1271 print_waiter(struct thread *td, int indent)
 1272 {
 1273         struct turnstile *ts;
 1274         int i;
 1275 
 1276         if (db_pager_quit)
 1277                 return;
 1278         for (i = 0; i < indent; i++)
 1279                 db_printf(" ");
 1280         print_thread(td, "thread ");
 1281         LIST_FOREACH(ts, &td->td_contested, ts_link)
 1282                 print_waiters(ts, indent + 1);
 1283 }
 1284 
 1285 static void
 1286 print_waiters(struct turnstile *ts, int indent)
 1287 {
 1288         struct lock_object *lock;
 1289         struct lock_class *class;
 1290         struct thread *td;
 1291         int i;
 1292 
 1293         if (db_pager_quit)
 1294                 return;
 1295         lock = ts->ts_lockobj;
 1296         class = LOCK_CLASS(lock);
 1297         for (i = 0; i < indent; i++)
 1298                 db_printf(" ");
 1299         db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
 1300         TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
 1301                 print_waiter(td, indent + 1);
 1302         TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
 1303                 print_waiter(td, indent + 1);
 1304         TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
 1305                 print_waiter(td, indent + 1);
 1306 }
 1307 
 1308 DB_SHOW_COMMAND(locktree, db_show_locktree)
 1309 {
 1310         struct lock_object *lock;
 1311         struct lock_class *class;
 1312         struct turnstile_chain *tc;
 1313         struct turnstile *ts;
 1314 
 1315         if (!have_addr)
 1316                 return;
 1317         lock = (struct lock_object *)addr;
 1318         tc = TC_LOOKUP(lock);
 1319         LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
 1320                 if (ts->ts_lockobj == lock)
 1321                         break;
 1322         if (ts == NULL) {
 1323                 class = LOCK_CLASS(lock);
 1324                 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
 1325                     lock->lo_name);
 1326         } else
 1327                 print_waiters(ts, 0);
 1328 }
 1329 #endif

Cache object: 1eed0a8fd7062bee9e74c1b02558de10


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.