The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_witness.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
   13  *    promote products derived from this software without specific prior
   14  *    written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *      from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
   29  *      and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
   30  * $FreeBSD: releng/5.1/sys/kern/subr_witness.c 115425 2003-05-31 06:42:37Z peter $
   31  */
   32 
   33 /*
   34  * Implementation of the `witness' lock verifier.  Originally implemented for
   35  * mutexes in BSD/OS.  Extended to handle generic lock objects and lock
   36  * classes in FreeBSD.
   37  */
   38 
   39 /*
   40  *      Main Entry: witness
   41  *      Pronunciation: 'wit-n&s
   42  *      Function: noun
   43  *      Etymology: Middle English witnesse, from Old English witnes knowledge,
   44  *          testimony, witness, from 2wit
   45  *      Date: before 12th century
   46  *      1 : attestation of a fact or event : TESTIMONY
   47  *      2 : one that gives evidence; specifically : one who testifies in
   48  *          a cause or before a judicial tribunal
   49  *      3 : one asked to be present at a transaction so as to be able to
   50  *          testify to its having taken place
   51  *      4 : one who has personal knowledge of something
   52  *      5 a : something serving as evidence or proof : SIGN
   53  *        b : public affirmation by word or example of usually
   54  *            religious faith or conviction <the heroic witness to divine
   55  *            life -- Pilot>
   56  *      6 capitalized : a member of the Jehovah's Witnesses 
   57  */
   58 
   59 /*
   60  * Special rules concerning Giant and lock orders:
   61  *
   62  * 1) Giant must be acquired before any other mutexes.  Stated another way,
   63  *    no other mutex may be held when Giant is acquired.
   64  *
   65  * 2) Giant must be released when blocking on a sleepable lock.
   66  *
   67  * This rule is less obvious, but is a result of Giant providing the same
   68  * semantics as spl().  Basically, when a thread sleeps, it must release
   69  * Giant.  When a thread blocks on a sleepable lock, it sleeps.  Hence rule
   70  * 2).
   71  *
   72  * 3) Giant may be acquired before or after sleepable locks.
   73  *
   74  * This rule is also not quite as obvious.  Giant may be acquired after
   75  * a sleepable lock because it is a non-sleepable lock and non-sleepable
   76  * locks may always be acquired while holding a sleepable lock.  The second
   77  * case, Giant before a sleepable lock, follows from rule 2) above.  Suppose
   78  * you have two threads T1 and T2 and a sleepable lock X.  Suppose that T1
   79  * acquires X and blocks on Giant.  Then suppose that T2 acquires Giant and
   80  * blocks on X.  When T2 blocks on X, T2 will release Giant allowing T1 to
   81  * execute.  Thus, acquiring Giant both before and after a sleepable lock
   82  * will not result in a lock order reversal.
   83  */
   84 
   85 #include "opt_ddb.h"
   86 #include "opt_witness.h"
   87 #ifdef __i386__
   88 #include "opt_swtch.h"
   89 #endif
   90 
   91 #include <sys/param.h>
   92 #include <sys/bus.h>
   93 #include <sys/kernel.h>
   94 #include <sys/ktr.h>
   95 #include <sys/lock.h>
   96 #include <sys/malloc.h>
   97 #include <sys/mutex.h>
   98 #include <sys/proc.h>
   99 #include <sys/sysctl.h>
  100 #include <sys/systm.h>
  101 
  102 #include <ddb/ddb.h>
  103 
  104 #include <machine/stdarg.h>
  105 
  106 /* Define this to check for blessed mutexes */
  107 #undef BLESSING
  108 
  109 #define WITNESS_COUNT 200
  110 #define WITNESS_CHILDCOUNT (WITNESS_COUNT * 4)
  111 /*
  112  * XXX: This is somewhat bogus, as we assume here that at most 1024 threads
  113  * will hold LOCK_NCHILDREN * 2 locks.  We handle failure ok, and we should
  114  * probably be safe for the most part, but it's still a SWAG.
  115  */
  116 #define LOCK_CHILDCOUNT (MAXCPU + 1024) * 2
  117 
  118 #define WITNESS_NCHILDREN 6
  119 
  120 struct witness_child_list_entry;
  121 
  122 struct witness {
  123         const   char *w_name;
  124         struct  lock_class *w_class;
  125         STAILQ_ENTRY(witness) w_list;           /* List of all witnesses. */
  126         STAILQ_ENTRY(witness) w_typelist;       /* Witnesses of a type. */
  127         struct  witness_child_list_entry *w_children;   /* Great evilness... */
  128         const   char *w_file;
  129         int     w_line;
  130         u_int   w_level;
  131         u_int   w_refcount;
  132         u_char  w_Giant_squawked:1;
  133         u_char  w_other_squawked:1;
  134         u_char  w_same_squawked:1;
  135         u_char  w_displayed:1;
  136 };
  137 
  138 struct witness_child_list_entry {
  139         struct  witness_child_list_entry *wcl_next;
  140         struct  witness *wcl_children[WITNESS_NCHILDREN];
  141         u_int   wcl_count;
  142 };
  143 
  144 STAILQ_HEAD(witness_list, witness);
  145 
  146 #ifdef BLESSING
  147 struct witness_blessed {
  148         const   char *b_lock1;
  149         const   char *b_lock2;
  150 };
  151 #endif
  152 
  153 struct witness_order_list_entry {
  154         const   char *w_name;
  155         struct  lock_class *w_class;
  156 };
  157 
  158 #ifdef BLESSING
  159 static int      blessed(struct witness *, struct witness *);
  160 #endif
  161 static int      depart(struct witness *w);
  162 static struct   witness *enroll(const char *description,
  163                                 struct lock_class *lock_class);
  164 static int      insertchild(struct witness *parent, struct witness *child);
  165 static int      isitmychild(struct witness *parent, struct witness *child);
  166 static int      isitmydescendant(struct witness *parent, struct witness *child);
  167 static int      itismychild(struct witness *parent, struct witness *child);
  168 static int      rebalancetree(struct witness_list *list);
  169 static void     removechild(struct witness *parent, struct witness *child);
  170 static int      reparentchildren(struct witness *newparent,
  171                     struct witness *oldparent);
  172 static int      sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS);
  173 static void     witness_displaydescendants(void(*)(const char *fmt, ...),
  174                                            struct witness *, int indent);
  175 static const char *fixup_filename(const char *file);
  176 static void     witness_leveldescendents(struct witness *parent, int level);
  177 static void     witness_levelall(void);
  178 static struct   witness *witness_get(void);
  179 static void     witness_free(struct witness *m);
  180 static struct   witness_child_list_entry *witness_child_get(void);
  181 static void     witness_child_free(struct witness_child_list_entry *wcl);
  182 static struct   lock_list_entry *witness_lock_list_get(void);
  183 static void     witness_lock_list_free(struct lock_list_entry *lle);
  184 static struct   lock_instance *find_instance(struct lock_list_entry *lock_list,
  185                                              struct lock_object *lock);
  186 static void     witness_list_lock(struct lock_instance *instance);
  187 #ifdef DDB
  188 static void     witness_list(struct thread *td);
  189 static void     witness_display_list(void(*prnt)(const char *fmt, ...),
  190                                      struct witness_list *list);
  191 static void     witness_display(void(*)(const char *fmt, ...));
  192 #endif
  193 
  194 MALLOC_DEFINE(M_WITNESS, "witness", "witness structure");
  195 
  196 /*
  197  * If set to 0, witness is disabled.  If set to 1, witness performs full lock
  198  * order checking for all locks.  If set to 2 or higher, then witness skips
  199  * the full lock order check if the lock being acquired is at a higher level
  200  * (i.e. farther down in the tree) than the current lock.  This last mode is
  201  * somewhat experimental and not considered fully safe.  At runtime, this
  202  * value may be set to 0 to turn off witness.  witness is not allowed be
  203  * turned on once it is turned off, however.
  204  */
  205 static int witness_watch = 1;
  206 TUNABLE_INT("debug.witness_watch", &witness_watch);
  207 SYSCTL_PROC(_debug, OID_AUTO, witness_watch, CTLFLAG_RW | CTLTYPE_INT, NULL, 0,
  208     sysctl_debug_witness_watch, "I", "witness is watching lock operations");
  209 
  210 #ifdef DDB
  211 /*
  212  * When DDB is enabled and witness_ddb is set to 1, it will cause the system to
  213  * drop into kdebug() when:
  214  *      - a lock heirarchy violation occurs
  215  *      - locks are held when going to sleep.
  216  */
  217 #ifdef WITNESS_DDB
  218 int     witness_ddb = 1;
  219 #else
  220 int     witness_ddb = 0;
  221 #endif
  222 TUNABLE_INT("debug.witness_ddb", &witness_ddb);
  223 SYSCTL_INT(_debug, OID_AUTO, witness_ddb, CTLFLAG_RW, &witness_ddb, 0, "");
  224 
  225 /*
  226  * When DDB is enabled and witness_trace is set to 1, it will cause the system
  227  * to print a stack trace:
  228  *      - a lock heirarchy violation occurs
  229  *      - locks are held when going to sleep.
  230  */
  231 int     witness_trace = 1;
  232 TUNABLE_INT("debug.witness_trace", &witness_trace);
  233 SYSCTL_INT(_debug, OID_AUTO, witness_trace, CTLFLAG_RW, &witness_trace, 0, "");
  234 #endif /* DDB */
  235 
  236 #ifdef WITNESS_SKIPSPIN
  237 int     witness_skipspin = 1;
  238 #else
  239 int     witness_skipspin = 0;
  240 #endif
  241 TUNABLE_INT("debug.witness_skipspin", &witness_skipspin);
  242 SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0,
  243     "");
  244 
  245 static struct mtx w_mtx;
  246 static struct witness_list w_free = STAILQ_HEAD_INITIALIZER(w_free);
  247 static struct witness_list w_all = STAILQ_HEAD_INITIALIZER(w_all);
  248 static struct witness_list w_spin = STAILQ_HEAD_INITIALIZER(w_spin);
  249 static struct witness_list w_sleep = STAILQ_HEAD_INITIALIZER(w_sleep);
  250 static struct witness_child_list_entry *w_child_free = NULL;
  251 static struct lock_list_entry *w_lock_list_free = NULL;
  252 
  253 static struct witness w_data[WITNESS_COUNT];
  254 static struct witness_child_list_entry w_childdata[WITNESS_CHILDCOUNT];
  255 static struct lock_list_entry w_locklistdata[LOCK_CHILDCOUNT];
  256 
  257 static struct witness_order_list_entry order_lists[] = {
  258         { "proctree", &lock_class_sx },
  259         { "allproc", &lock_class_sx },
  260         { "Giant", &lock_class_mtx_sleep },
  261         { "filedesc structure", &lock_class_mtx_sleep },
  262         { "pipe mutex", &lock_class_mtx_sleep },
  263         { "sigio lock", &lock_class_mtx_sleep },
  264         { "process group", &lock_class_mtx_sleep },
  265         { "process lock", &lock_class_mtx_sleep },
  266         { "session", &lock_class_mtx_sleep },
  267         { "uidinfo hash", &lock_class_mtx_sleep },
  268         { "uidinfo struct", &lock_class_mtx_sleep },
  269         { "allprison", &lock_class_mtx_sleep },
  270         { NULL, NULL },
  271         /*
  272          * spin locks
  273          */
  274 #ifdef SMP
  275         { "ap boot", &lock_class_mtx_spin },
  276 #ifdef __i386__
  277         { "com", &lock_class_mtx_spin },
  278 #endif
  279 #endif
  280         { "sio", &lock_class_mtx_spin },
  281 #ifdef __i386__
  282         { "cy", &lock_class_mtx_spin },
  283 #endif
  284         { "sabtty", &lock_class_mtx_spin },
  285         { "zstty", &lock_class_mtx_spin },
  286         { "ng_node", &lock_class_mtx_spin },
  287         { "ng_worklist", &lock_class_mtx_spin },
  288         { "ithread table lock", &lock_class_mtx_spin },
  289         { "sched lock", &lock_class_mtx_spin },
  290         { "callout", &lock_class_mtx_spin },
  291         /*
  292          * leaf locks
  293          */
  294         { "allpmaps", &lock_class_mtx_spin },
  295         { "vm page queue free mutex", &lock_class_mtx_spin },
  296         { "icu", &lock_class_mtx_spin },
  297 #ifdef SMP
  298         { "smp rendezvous", &lock_class_mtx_spin },
  299 #if defined(__i386__) && defined(APIC_IO)
  300         { "tlb", &lock_class_mtx_spin },
  301 #endif
  302 #if defined(__i386__) && defined(LAZY_SWITCH)
  303         { "lazypmap", &lock_class_mtx_spin },
  304 #endif
  305 #ifdef __sparc64__
  306         { "ipi", &lock_class_mtx_spin },
  307 #endif
  308 #endif
  309         { "clk", &lock_class_mtx_spin },
  310         { "mutex profiling lock", &lock_class_mtx_spin },
  311         { "kse zombie lock", &lock_class_mtx_spin },
  312         { "ALD Queue", &lock_class_mtx_spin },
  313 #ifdef __ia64__
  314         { "MCA spin lock", &lock_class_mtx_spin },
  315 #endif
  316 #if defined(__i386__) || defined(__amd64__)
  317         { "pcicfg", &lock_class_mtx_spin },
  318 #endif
  319         { NULL, NULL },
  320         { NULL, NULL }
  321 };
  322 
  323 #ifdef BLESSING
  324 /*
  325  * Pairs of locks which have been blessed
  326  * Don't complain about order problems with blessed locks
  327  */
  328 static struct witness_blessed blessed_list[] = {
  329 };
  330 static int blessed_count =
  331         sizeof(blessed_list) / sizeof(struct witness_blessed);
  332 #endif
  333 
  334 /*
  335  * List of all locks in the system.
  336  */
  337 TAILQ_HEAD(, lock_object) all_locks = TAILQ_HEAD_INITIALIZER(all_locks);
  338 
  339 static struct mtx all_mtx = {
  340         { &lock_class_mtx_sleep,        /* mtx_object.lo_class */
  341           "All locks list",             /* mtx_object.lo_name */
  342           "All locks list",             /* mtx_object.lo_type */
  343           LO_INITIALIZED,               /* mtx_object.lo_flags */
  344           { NULL, NULL },               /* mtx_object.lo_list */
  345           NULL },                       /* mtx_object.lo_witness */
  346         MTX_UNOWNED, 0,                 /* mtx_lock, mtx_recurse */
  347         TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
  348         { NULL, NULL }                  /* mtx_contested */
  349 };
  350 
  351 /*
  352  * This global is set to 0 once it becomes safe to use the witness code.
  353  */
  354 static int witness_cold = 1;
  355 
  356 /*
  357  * Global variables for book keeping.
  358  */
  359 static int lock_cur_cnt;
  360 static int lock_max_cnt;
  361 
  362 /*
  363  * The WITNESS-enabled diagnostic code.
  364  */
  365 static void
  366 witness_initialize(void *dummy __unused)
  367 {
  368         struct lock_object *lock;
  369         struct witness_order_list_entry *order;
  370         struct witness *w, *w1;
  371         int i;
  372 
  373         /*
  374          * We have to release Giant before initializing its witness
  375          * structure so that WITNESS doesn't get confused.
  376          */
  377         mtx_unlock(&Giant);
  378         mtx_assert(&Giant, MA_NOTOWNED);
  379 
  380         CTR1(KTR_WITNESS, "%s: initializing witness", __func__);
  381         TAILQ_INSERT_HEAD(&all_locks, &all_mtx.mtx_object, lo_list);
  382         mtx_init(&w_mtx, "witness lock", NULL, MTX_SPIN | MTX_QUIET |
  383             MTX_NOWITNESS);
  384         for (i = 0; i < WITNESS_COUNT; i++)
  385                 witness_free(&w_data[i]);
  386         for (i = 0; i < WITNESS_CHILDCOUNT; i++)
  387                 witness_child_free(&w_childdata[i]);
  388         for (i = 0; i < LOCK_CHILDCOUNT; i++)
  389                 witness_lock_list_free(&w_locklistdata[i]);
  390 
  391         /* First add in all the specified order lists. */
  392         for (order = order_lists; order->w_name != NULL; order++) {
  393                 w = enroll(order->w_name, order->w_class);
  394                 if (w == NULL)
  395                         continue;
  396                 w->w_file = "order list";
  397                 for (order++; order->w_name != NULL; order++) {
  398                         w1 = enroll(order->w_name, order->w_class);
  399                         if (w1 == NULL)
  400                                 continue;
  401                         w1->w_file = "order list";
  402                         if (!itismychild(w, w1))
  403                                 panic("Not enough memory for static orders!");
  404                         w = w1;
  405                 }
  406         }
  407 
  408         /* Iterate through all locks and add them to witness. */
  409         mtx_lock(&all_mtx);
  410         TAILQ_FOREACH(lock, &all_locks, lo_list) {
  411                 if (lock->lo_flags & LO_WITNESS)
  412                         lock->lo_witness = enroll(lock->lo_type,
  413                             lock->lo_class);
  414                 else
  415                         lock->lo_witness = NULL;
  416         }
  417         mtx_unlock(&all_mtx);
  418 
  419         /* Mark the witness code as being ready for use. */
  420         atomic_store_rel_int(&witness_cold, 0);
  421 
  422         mtx_lock(&Giant);
  423 }
  424 SYSINIT(witness_init, SI_SUB_WITNESS, SI_ORDER_FIRST, witness_initialize, NULL)
  425 
  426 static int
  427 sysctl_debug_witness_watch(SYSCTL_HANDLER_ARGS)
  428 {
  429         int error, value;
  430 
  431         value = witness_watch;
  432         error = sysctl_handle_int(oidp, &value, 0, req);
  433         if (error != 0 || req->newptr == NULL)
  434                 return (error);
  435         error = suser(req->td);
  436         if (error != 0)
  437                 return (error);
  438         if (value == witness_watch)
  439                 return (0);
  440         if (value != 0)
  441                 return (EINVAL);
  442         witness_watch = 0;
  443         return (0);
  444 }
  445 
  446 void
  447 witness_init(struct lock_object *lock)
  448 {
  449         struct lock_class *class;
  450 
  451         class = lock->lo_class;
  452         if (lock->lo_flags & LO_INITIALIZED)
  453                 panic("%s: lock (%s) %s is already initialized", __func__,
  454                     class->lc_name, lock->lo_name);
  455         if ((lock->lo_flags & LO_RECURSABLE) != 0 &&
  456             (class->lc_flags & LC_RECURSABLE) == 0)
  457                 panic("%s: lock (%s) %s can not be recursable", __func__,
  458                     class->lc_name, lock->lo_name);
  459         if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
  460             (class->lc_flags & LC_SLEEPABLE) == 0)
  461                 panic("%s: lock (%s) %s can not be sleepable", __func__,
  462                     class->lc_name, lock->lo_name);
  463         if ((lock->lo_flags & LO_UPGRADABLE) != 0 &&
  464             (class->lc_flags & LC_UPGRADABLE) == 0)
  465                 panic("%s: lock (%s) %s can not be upgradable", __func__,
  466                     class->lc_name, lock->lo_name);
  467 
  468         mtx_lock(&all_mtx);
  469         TAILQ_INSERT_TAIL(&all_locks, lock, lo_list);
  470         lock->lo_flags |= LO_INITIALIZED;
  471         lock_cur_cnt++;
  472         if (lock_cur_cnt > lock_max_cnt)
  473                 lock_max_cnt = lock_cur_cnt;
  474         mtx_unlock(&all_mtx);
  475         if (!witness_cold && witness_watch != 0 && panicstr == NULL &&
  476             (lock->lo_flags & LO_WITNESS) != 0)
  477                 lock->lo_witness = enroll(lock->lo_type, class);
  478         else
  479                 lock->lo_witness = NULL;
  480 }
  481 
  482 void
  483 witness_destroy(struct lock_object *lock)
  484 {
  485         struct witness *w;
  486 
  487         if (witness_cold)
  488                 panic("lock (%s) %s destroyed while witness_cold",
  489                     lock->lo_class->lc_name, lock->lo_name);
  490         if ((lock->lo_flags & LO_INITIALIZED) == 0)
  491                 panic("%s: lock (%s) %s is not initialized", __func__,
  492                     lock->lo_class->lc_name, lock->lo_name);
  493 
  494         /* XXX: need to verify that no one holds the lock */
  495         w = lock->lo_witness;
  496         if (w != NULL) {
  497                 mtx_lock_spin(&w_mtx);
  498                 MPASS(w->w_refcount > 0);
  499                 w->w_refcount--;
  500 
  501                 /*
  502                  * Lock is already released if we have an allocation failure
  503                  * and depart() fails.
  504                  */
  505                 if (w->w_refcount != 0 || depart(w))
  506                         mtx_unlock_spin(&w_mtx);
  507         }
  508 
  509         mtx_lock(&all_mtx);
  510         lock_cur_cnt--;
  511         TAILQ_REMOVE(&all_locks, lock, lo_list);
  512         lock->lo_flags &= ~LO_INITIALIZED;
  513         mtx_unlock(&all_mtx);
  514 }
  515 
  516 #ifdef DDB
  517 static void
  518 witness_display_list(void(*prnt)(const char *fmt, ...),
  519                      struct witness_list *list)
  520 {
  521         struct witness *w;
  522 
  523         STAILQ_FOREACH(w, list, w_typelist) {
  524                 if (w->w_file == NULL || w->w_level > 0)
  525                         continue;
  526                 /*
  527                  * This lock has no anscestors, display its descendants. 
  528                  */
  529                 witness_displaydescendants(prnt, w, 0);
  530         }
  531 }
  532         
  533 static void
  534 witness_display(void(*prnt)(const char *fmt, ...))
  535 {
  536         struct witness *w;
  537 
  538         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
  539         witness_levelall();
  540 
  541         /* Clear all the displayed flags. */
  542         STAILQ_FOREACH(w, &w_all, w_list) {
  543                 w->w_displayed = 0;
  544         }
  545 
  546         /*
  547          * First, handle sleep locks which have been acquired at least
  548          * once.
  549          */
  550         prnt("Sleep locks:\n");
  551         witness_display_list(prnt, &w_sleep);
  552         
  553         /*
  554          * Now do spin locks which have been acquired at least once.
  555          */
  556         prnt("\nSpin locks:\n");
  557         witness_display_list(prnt, &w_spin);
  558         
  559         /*
  560          * Finally, any locks which have not been acquired yet.
  561          */
  562         prnt("\nLocks which were never acquired:\n");
  563         STAILQ_FOREACH(w, &w_all, w_list) {
  564                 if (w->w_file != NULL || w->w_refcount == 0)
  565                         continue;
  566                 prnt("%s\n", w->w_name);
  567         }
  568 }
  569 #endif /* DDB */
  570 
  571 /* Trim useless garbage from filenames. */
  572 static const char *
  573 fixup_filename(const char *file)
  574 {
  575 
  576         if (file == NULL)
  577                 return (NULL);
  578         while (strncmp(file, "../", 3) == 0)
  579                 file += 3;
  580         return (file);
  581 }
  582 
  583 void
  584 witness_lock(struct lock_object *lock, int flags, const char *file, int line)
  585 {
  586         struct lock_list_entry **lock_list, *lle;
  587         struct lock_instance *lock1, *lock2;
  588         struct lock_class *class;
  589         struct witness *w, *w1;
  590         struct thread *td;
  591         int i, j;
  592 #ifdef DDB
  593         int go_into_ddb = 0;
  594 #endif
  595 
  596         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
  597             panicstr != NULL)
  598                 return;
  599         w = lock->lo_witness;
  600         class = lock->lo_class;
  601         td = curthread;
  602         file = fixup_filename(file);
  603 
  604         if (class->lc_flags & LC_SLEEPLOCK) {
  605                 /*
  606                  * Since spin locks include a critical section, this check
  607                  * impliclty enforces a lock order of all sleep locks before
  608                  * all spin locks.
  609                  */
  610                 if (td->td_critnest != 0 && (flags & LOP_TRYLOCK) == 0)
  611                         panic("blockable sleep lock (%s) %s @ %s:%d",
  612                             class->lc_name, lock->lo_name, file, line);
  613                 lock_list = &td->td_sleeplocks;
  614         } else
  615                 lock_list = PCPU_PTR(spinlocks);
  616 
  617         /*
  618          * Is this the first lock acquired?  If so, then no order checking
  619          * is needed.
  620          */
  621         if (*lock_list == NULL)
  622                 goto out;
  623 
  624         /*
  625          * Check to see if we are recursing on a lock we already own.
  626          */
  627         lock1 = find_instance(*lock_list, lock);
  628         if (lock1 != NULL) {
  629                 if ((lock1->li_flags & LI_EXCLUSIVE) != 0 &&
  630                     (flags & LOP_EXCLUSIVE) == 0) {
  631                         printf("shared lock of (%s) %s @ %s:%d\n",
  632                             class->lc_name, lock->lo_name, file, line);
  633                         printf("while exclusively locked from %s:%d\n",
  634                             lock1->li_file, lock1->li_line);
  635                         panic("share->excl");
  636                 }
  637                 if ((lock1->li_flags & LI_EXCLUSIVE) == 0 &&
  638                     (flags & LOP_EXCLUSIVE) != 0) {
  639                         printf("exclusive lock of (%s) %s @ %s:%d\n",
  640                             class->lc_name, lock->lo_name, file, line);
  641                         printf("while share locked from %s:%d\n",
  642                             lock1->li_file, lock1->li_line);
  643                         panic("excl->share");
  644                 }
  645                 lock1->li_flags++;
  646                 if ((lock->lo_flags & LO_RECURSABLE) == 0) {
  647                         printf(
  648                         "recursed on non-recursive lock (%s) %s @ %s:%d\n",
  649                             class->lc_name, lock->lo_name, file, line);
  650                         printf("first acquired @ %s:%d\n", lock1->li_file,
  651                             lock1->li_line);
  652                         panic("recurse");
  653                 }
  654                 CTR4(KTR_WITNESS, "%s: pid %d recursed on %s r=%d", __func__,
  655                     td->td_proc->p_pid, lock->lo_name,
  656                     lock1->li_flags & LI_RECURSEMASK);
  657                 lock1->li_file = file;
  658                 lock1->li_line = line;
  659                 return;
  660         }
  661 
  662         /*
  663          * Try locks do not block if they fail to acquire the lock, thus
  664          * there is no danger of deadlocks or of switching while holding a
  665          * spin lock if we acquire a lock via a try operation.
  666          */
  667         if (flags & LOP_TRYLOCK)
  668                 goto out;
  669 
  670         /*
  671          * Check for duplicate locks of the same type.  Note that we only
  672          * have to check for this on the last lock we just acquired.  Any
  673          * other cases will be caught as lock order violations.
  674          */
  675         lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
  676         w1 = lock1->li_lock->lo_witness;
  677         if (w1 == w) {
  678                 if (w->w_same_squawked || (lock->lo_flags & LO_DUPOK))
  679                         goto out;
  680                 w->w_same_squawked = 1;
  681                 printf("acquiring duplicate lock of same type: \"%s\"\n", 
  682                         lock->lo_type);
  683                 printf(" 1st %s @ %s:%d\n", lock1->li_lock->lo_name,
  684                     lock1->li_file, lock1->li_line);
  685                 printf(" 2nd %s @ %s:%d\n", lock->lo_name, file, line);
  686 #ifdef DDB
  687                 go_into_ddb = 1;
  688 #endif
  689                 goto out;
  690         }
  691         MPASS(!mtx_owned(&w_mtx));
  692         mtx_lock_spin(&w_mtx);
  693         /*
  694          * If we have a known higher number just say ok
  695          */
  696         if (witness_watch > 1 && w->w_level > w1->w_level) {
  697                 mtx_unlock_spin(&w_mtx);
  698                 goto out;
  699         }
  700         /*
  701          * If we know that the the lock we are acquiring comes after
  702          * the lock we most recently acquired in the lock order tree,
  703          * then there is no need for any further checks.
  704          */
  705         if (isitmydescendant(w1, w)) {
  706                 mtx_unlock_spin(&w_mtx);
  707                 goto out;
  708         }
  709         for (j = 0, lle = *lock_list; lle != NULL; lle = lle->ll_next) {
  710                 for (i = lle->ll_count - 1; i >= 0; i--, j++) {
  711 
  712                         MPASS(j < WITNESS_COUNT);
  713                         lock1 = &lle->ll_children[i];
  714                         w1 = lock1->li_lock->lo_witness;
  715 
  716                         /*
  717                          * If this lock doesn't undergo witness checking,
  718                          * then skip it.
  719                          */
  720                         if (w1 == NULL) {
  721                                 KASSERT((lock1->li_lock->lo_flags & LO_WITNESS) == 0,
  722                                     ("lock missing witness structure"));
  723                                 continue;
  724                         }
  725                         /*
  726                          * If we are locking Giant and this is a sleepable
  727                          * lock, then skip it.
  728                          */
  729                         if ((lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0 &&
  730                             lock == &Giant.mtx_object)
  731                                 continue;
  732                         /*
  733                          * If we are locking a sleepable lock and this lock
  734                          * is Giant, then skip it.
  735                          */
  736                         if ((lock->lo_flags & LO_SLEEPABLE) != 0 &&
  737                             lock1->li_lock == &Giant.mtx_object)
  738                                 continue;
  739                         /*
  740                          * If we are locking a sleepable lock and this lock
  741                          * isn't sleepable, we want to treat it as a lock
  742                          * order violation to enfore a general lock order of
  743                          * sleepable locks before non-sleepable locks.
  744                          */
  745                         if (!((lock->lo_flags & LO_SLEEPABLE) != 0 &&
  746                             (lock1->li_lock->lo_flags & LO_SLEEPABLE) == 0))
  747                             /*
  748                              * Check the lock order hierarchy for a reveresal.
  749                              */
  750                             if (!isitmydescendant(w, w1))
  751                                 continue;
  752                         /*
  753                          * We have a lock order violation, check to see if it
  754                          * is allowed or has already been yelled about.
  755                          */
  756                         mtx_unlock_spin(&w_mtx);
  757 #ifdef BLESSING
  758                         if (blessed(w, w1))
  759                                 goto out;
  760 #endif
  761                         if (lock1->li_lock == &Giant.mtx_object) {
  762                                 if (w1->w_Giant_squawked)
  763                                         goto out;
  764                                 else
  765                                         w1->w_Giant_squawked = 1;
  766                         } else {
  767                                 if (w1->w_other_squawked)
  768                                         goto out;
  769                                 else
  770                                         w1->w_other_squawked = 1;
  771                         }
  772                         /*
  773                          * Ok, yell about it.
  774                          */
  775                         printf("lock order reversal\n");
  776                         /*
  777                          * Try to locate an earlier lock with
  778                          * witness w in our list.
  779                          */
  780                         do {
  781                                 lock2 = &lle->ll_children[i];
  782                                 MPASS(lock2->li_lock != NULL);
  783                                 if (lock2->li_lock->lo_witness == w)
  784                                         break;
  785                                 i--;
  786                                 if (i == 0 && lle->ll_next != NULL) {
  787                                         lle = lle->ll_next;
  788                                         i = lle->ll_count - 1;
  789                                         MPASS(i >= 0 && i < LOCK_NCHILDREN);
  790                                 }
  791                         } while (i >= 0);
  792                         if (i < 0) {
  793                                 printf(" 1st %p %s (%s) @ %s:%d\n",
  794                                     lock1->li_lock, lock1->li_lock->lo_name,
  795                                     lock1->li_lock->lo_type, lock1->li_file,
  796                                     lock1->li_line);
  797                                 printf(" 2nd %p %s (%s) @ %s:%d\n", lock,
  798                                     lock->lo_name, lock->lo_type, file, line);
  799                         } else {
  800                                 printf(" 1st %p %s (%s) @ %s:%d\n",
  801                                     lock2->li_lock, lock2->li_lock->lo_name,
  802                                     lock2->li_lock->lo_type, lock2->li_file,
  803                                     lock2->li_line);
  804                                 printf(" 2nd %p %s (%s) @ %s:%d\n",
  805                                     lock1->li_lock, lock1->li_lock->lo_name,
  806                                     lock1->li_lock->lo_type, lock1->li_file,
  807                                     lock1->li_line);
  808                                 printf(" 3rd %p %s (%s) @ %s:%d\n", lock,
  809                                     lock->lo_name, lock->lo_type, file, line);
  810                         }
  811 #ifdef DDB
  812                         go_into_ddb = 1;
  813 #endif
  814                         goto out;
  815                 }
  816         }
  817         lock1 = &(*lock_list)->ll_children[(*lock_list)->ll_count - 1];
  818         /*
  819          * Don't build a new relationship between a sleepable lock and
  820          * Giant if it is the wrong direction.  The real lock order is that
  821          * sleepable locks come before Giant.
  822          */
  823         if (!(lock1->li_lock == &Giant.mtx_object &&
  824             (lock->lo_flags & LO_SLEEPABLE) != 0)) {
  825                 CTR3(KTR_WITNESS, "%s: adding %s as a child of %s", __func__,
  826                     lock->lo_type, lock1->li_lock->lo_type);
  827                 if (!itismychild(lock1->li_lock->lo_witness, w))
  828                         /* Witness is dead. */
  829                         return;
  830         } 
  831         mtx_unlock_spin(&w_mtx);
  832 
  833 out:
  834 #ifdef DDB
  835         if (go_into_ddb) {
  836                 if (witness_trace)
  837                         backtrace();
  838                 if (witness_ddb)
  839                         Debugger(__func__);
  840         }
  841 #endif
  842         w->w_file = file;
  843         w->w_line = line;
  844         
  845         lle = *lock_list;
  846         if (lle == NULL || lle->ll_count == LOCK_NCHILDREN) {
  847                 lle = witness_lock_list_get();
  848                 if (lle == NULL)
  849                         return;
  850                 lle->ll_next = *lock_list;
  851                 CTR3(KTR_WITNESS, "%s: pid %d added lle %p", __func__,
  852                     td->td_proc->p_pid, lle);
  853                 *lock_list = lle;
  854         }
  855         lock1 = &lle->ll_children[lle->ll_count++];
  856         lock1->li_lock = lock;
  857         lock1->li_line = line;
  858         lock1->li_file = file;
  859         if ((flags & LOP_EXCLUSIVE) != 0)
  860                 lock1->li_flags = LI_EXCLUSIVE;
  861         else
  862                 lock1->li_flags = 0;
  863         CTR4(KTR_WITNESS, "%s: pid %d added %s as lle[%d]", __func__,
  864             td->td_proc->p_pid, lock->lo_name, lle->ll_count - 1);
  865 }
  866 
  867 void
  868 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line)
  869 {
  870         struct lock_instance *instance;
  871         struct lock_class *class;
  872 
  873         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
  874         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
  875                 return;
  876         class = lock->lo_class;
  877         file = fixup_filename(file);
  878         if ((lock->lo_flags & LO_UPGRADABLE) == 0)
  879                 panic("upgrade of non-upgradable lock (%s) %s @ %s:%d",
  880                     class->lc_name, lock->lo_name, file, line);
  881         if ((flags & LOP_TRYLOCK) == 0)
  882                 panic("non-try upgrade of lock (%s) %s @ %s:%d", class->lc_name,
  883                     lock->lo_name, file, line);
  884         if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
  885                 panic("upgrade of non-sleep lock (%s) %s @ %s:%d",
  886                     class->lc_name, lock->lo_name, file, line);
  887         instance = find_instance(curthread->td_sleeplocks, lock);
  888         if (instance == NULL)
  889                 panic("upgrade of unlocked lock (%s) %s @ %s:%d",
  890                     class->lc_name, lock->lo_name, file, line);
  891         if ((instance->li_flags & LI_EXCLUSIVE) != 0)
  892                 panic("upgrade of exclusive lock (%s) %s @ %s:%d",
  893                     class->lc_name, lock->lo_name, file, line);
  894         if ((instance->li_flags & LI_RECURSEMASK) != 0)
  895                 panic("upgrade of recursed lock (%s) %s r=%d @ %s:%d",
  896                     class->lc_name, lock->lo_name,
  897                     instance->li_flags & LI_RECURSEMASK, file, line);
  898         instance->li_flags |= LI_EXCLUSIVE;
  899 }
  900 
  901 void
  902 witness_downgrade(struct lock_object *lock, int flags, const char *file,
  903     int line)
  904 {
  905         struct lock_instance *instance;
  906         struct lock_class *class;
  907 
  908         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
  909         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
  910                 return;
  911         class = lock->lo_class;
  912         file = fixup_filename(file);
  913         if ((lock->lo_flags & LO_UPGRADABLE) == 0)
  914                 panic("downgrade of non-upgradable lock (%s) %s @ %s:%d",
  915                     class->lc_name, lock->lo_name, file, line);
  916         if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
  917                 panic("downgrade of non-sleep lock (%s) %s @ %s:%d",
  918                     class->lc_name, lock->lo_name, file, line);
  919         instance = find_instance(curthread->td_sleeplocks, lock);
  920         if (instance == NULL)
  921                 panic("downgrade of unlocked lock (%s) %s @ %s:%d",
  922                     class->lc_name, lock->lo_name, file, line);
  923         if ((instance->li_flags & LI_EXCLUSIVE) == 0)
  924                 panic("downgrade of shared lock (%s) %s @ %s:%d",
  925                     class->lc_name, lock->lo_name, file, line);
  926         if ((instance->li_flags & LI_RECURSEMASK) != 0)
  927                 panic("downgrade of recursed lock (%s) %s r=%d @ %s:%d",
  928                     class->lc_name, lock->lo_name,
  929                     instance->li_flags & LI_RECURSEMASK, file, line);
  930         instance->li_flags &= ~LI_EXCLUSIVE;
  931 }
  932 
  933 void
  934 witness_unlock(struct lock_object *lock, int flags, const char *file, int line)
  935 {
  936         struct lock_list_entry **lock_list, *lle;
  937         struct lock_instance *instance;
  938         struct lock_class *class;
  939         struct thread *td;
  940         register_t s;
  941         int i, j;
  942 
  943         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL ||
  944             panicstr != NULL)
  945                 return;
  946         td = curthread;
  947         class = lock->lo_class;
  948         file = fixup_filename(file);
  949         if (class->lc_flags & LC_SLEEPLOCK)
  950                 lock_list = &td->td_sleeplocks;
  951         else
  952                 lock_list = PCPU_PTR(spinlocks);
  953         for (; *lock_list != NULL; lock_list = &(*lock_list)->ll_next)
  954                 for (i = 0; i < (*lock_list)->ll_count; i++) {
  955                         instance = &(*lock_list)->ll_children[i];
  956                         if (instance->li_lock == lock) {
  957                                 if ((instance->li_flags & LI_EXCLUSIVE) != 0 &&
  958                                     (flags & LOP_EXCLUSIVE) == 0) {
  959                                         printf(
  960                                         "shared unlock of (%s) %s @ %s:%d\n",
  961                                             class->lc_name, lock->lo_name,
  962                                             file, line);
  963                                         printf(
  964                                         "while exclusively locked from %s:%d\n",
  965                                             instance->li_file,
  966                                             instance->li_line);
  967                                         panic("excl->ushare");
  968                                 }
  969                                 if ((instance->li_flags & LI_EXCLUSIVE) == 0 &&
  970                                     (flags & LOP_EXCLUSIVE) != 0) {
  971                                         printf(
  972                                         "exclusive unlock of (%s) %s @ %s:%d\n",
  973                                             class->lc_name, lock->lo_name,
  974                                             file, line);
  975                                         printf(
  976                                         "while share locked from %s:%d\n",
  977                                             instance->li_file,
  978                                             instance->li_line);
  979                                         panic("share->uexcl");
  980                                 }
  981                                 /* If we are recursed, unrecurse. */
  982                                 if ((instance->li_flags & LI_RECURSEMASK) > 0) {
  983                                         CTR4(KTR_WITNESS,
  984                                     "%s: pid %d unrecursed on %s r=%d", __func__,
  985                                             td->td_proc->p_pid,
  986                                             instance->li_lock->lo_name,
  987                                             instance->li_flags);
  988                                         instance->li_flags--;
  989                                         return;
  990                                 }
  991                                 s = intr_disable();
  992                                 CTR4(KTR_WITNESS,
  993                                     "%s: pid %d removed %s from lle[%d]", __func__,
  994                                     td->td_proc->p_pid,
  995                                     instance->li_lock->lo_name,
  996                                     (*lock_list)->ll_count - 1);
  997                                 for (j = i; j < (*lock_list)->ll_count - 1; j++)
  998                                         (*lock_list)->ll_children[j] =
  999                                             (*lock_list)->ll_children[j + 1];
 1000                                 (*lock_list)->ll_count--;
 1001                                 intr_restore(s);
 1002                                 if ((*lock_list)->ll_count == 0) {
 1003                                         lle = *lock_list;
 1004                                         *lock_list = lle->ll_next;
 1005                                         CTR3(KTR_WITNESS,
 1006                                             "%s: pid %d removed lle %p", __func__,
 1007                                             td->td_proc->p_pid, lle);
 1008                                         witness_lock_list_free(lle);
 1009                                 }
 1010                                 return;
 1011                         }
 1012                 }
 1013         panic("lock (%s) %s not locked @ %s:%d", class->lc_name, lock->lo_name,
 1014             file, line);
 1015 }
 1016 
 1017 /*
 1018  * Warn if any locks other than 'lock' are held.  Flags can be passed in to
 1019  * exempt Giant and sleepable locks from the checks as well.  If any
 1020  * non-exempt locks are held, then a supplied message is printed to the
 1021  * console along with a list of the offending locks.  If indicated in the
 1022  * flags then a failure results in a panic as well.
 1023  */
 1024 int
 1025 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...)
 1026 {
 1027         struct lock_list_entry *lle;
 1028         struct lock_instance *lock1;
 1029         struct thread *td;
 1030         va_list ap;
 1031         int i, n;
 1032 
 1033         if (witness_cold || witness_watch == 0 || panicstr != NULL)
 1034                 return (0);
 1035         n = 0;
 1036         td = curthread;
 1037         for (lle = td->td_sleeplocks; lle != NULL; lle = lle->ll_next)
 1038                 for (i = lle->ll_count - 1; i >= 0; i--) {
 1039                         lock1 = &lle->ll_children[i];
 1040                         if (lock1->li_lock == lock)
 1041                                 continue;
 1042                         if (flags & WARN_GIANTOK &&
 1043                             lock1->li_lock == &Giant.mtx_object)
 1044                                 continue;
 1045                         if (flags & WARN_SLEEPOK &&
 1046                             (lock1->li_lock->lo_flags & LO_SLEEPABLE) != 0)
 1047                                 continue;
 1048                         if (n == 0) {
 1049                                 va_start(ap, fmt);
 1050                                 vprintf(fmt, ap);
 1051                                 va_end(ap);
 1052                                 printf(" with the following");
 1053                                 if (flags & WARN_SLEEPOK)
 1054                                         printf(" non-sleepable");
 1055                                 printf("locks held:\n");
 1056                         }
 1057                         n++;
 1058                         witness_list_lock(lock1);
 1059                 }
 1060         if (PCPU_GET(spinlocks) != NULL) {
 1061                 /*
 1062                  * Since we already hold a spinlock preemption is
 1063                  * already blocked.
 1064                  */
 1065                 if (n == 0) {
 1066                         va_start(ap, fmt);
 1067                         vprintf(fmt, ap);
 1068                         va_end(ap);
 1069                         printf(" with the following");
 1070                         if (flags & WARN_SLEEPOK)
 1071                                 printf(" non-sleepable");
 1072                         printf("locks held:\n");
 1073                 }
 1074                 n += witness_list_locks(PCPU_PTR(spinlocks));
 1075         }
 1076         if (flags & WARN_PANIC && n)
 1077                 panic("witness_warn");
 1078 #ifdef DDB
 1079         else if (witness_ddb && n)
 1080                 Debugger(__func__);
 1081 #endif
 1082         return (n);
 1083 }
 1084 
 1085 const char *
 1086 witness_file(struct lock_object *lock)
 1087 {
 1088         struct witness *w;
 1089 
 1090         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
 1091                 return ("?");
 1092         w = lock->lo_witness;
 1093         return (w->w_file);
 1094 }
 1095 
 1096 int
 1097 witness_line(struct lock_object *lock)
 1098 {
 1099         struct witness *w;
 1100 
 1101         if (witness_cold || witness_watch == 0 || lock->lo_witness == NULL)
 1102                 return (0);
 1103         w = lock->lo_witness;
 1104         return (w->w_line);
 1105 }
 1106 
 1107 static struct witness *
 1108 enroll(const char *description, struct lock_class *lock_class)
 1109 {
 1110         struct witness *w;
 1111 
 1112         if (!witness_watch || witness_watch == 0 || panicstr != NULL)
 1113                 return (NULL);
 1114         if ((lock_class->lc_flags & LC_SPINLOCK) && witness_skipspin)
 1115                 return (NULL);
 1116         mtx_lock_spin(&w_mtx);
 1117         STAILQ_FOREACH(w, &w_all, w_list) {
 1118                 if (w->w_name == description || (w->w_refcount > 0 &&
 1119                     strcmp(description, w->w_name) == 0)) {
 1120                         w->w_refcount++;
 1121                         mtx_unlock_spin(&w_mtx);
 1122                         if (lock_class != w->w_class)
 1123                                 panic(
 1124                                 "lock (%s) %s does not match earlier (%s) lock",
 1125                                     description, lock_class->lc_name,
 1126                                     w->w_class->lc_name);
 1127                         return (w);
 1128                 }
 1129         }
 1130         /*
 1131          * This isn't quite right, as witness_cold is still 0 while we
 1132          * enroll all the locks initialized before witness_initialize().
 1133          */
 1134         if ((lock_class->lc_flags & LC_SPINLOCK) && !witness_cold) {
 1135                 mtx_unlock_spin(&w_mtx);
 1136                 panic("spin lock %s not in order list", description);
 1137         }
 1138         if ((w = witness_get()) == NULL)
 1139                 return (NULL);
 1140         w->w_name = description;
 1141         w->w_class = lock_class;
 1142         w->w_refcount = 1;
 1143         STAILQ_INSERT_HEAD(&w_all, w, w_list);
 1144         if (lock_class->lc_flags & LC_SPINLOCK)
 1145                 STAILQ_INSERT_HEAD(&w_spin, w, w_typelist);
 1146         else if (lock_class->lc_flags & LC_SLEEPLOCK)
 1147                 STAILQ_INSERT_HEAD(&w_sleep, w, w_typelist);
 1148         else {
 1149                 mtx_unlock_spin(&w_mtx);
 1150                 panic("lock class %s is not sleep or spin",
 1151                     lock_class->lc_name);
 1152         }
 1153         mtx_unlock_spin(&w_mtx);
 1154         return (w);
 1155 }
 1156 
 1157 /* Don't let the door bang you on the way out... */
 1158 static int
 1159 depart(struct witness *w)
 1160 {
 1161         struct witness_child_list_entry *wcl, *nwcl;
 1162         struct witness_list *list;
 1163         struct witness *parent;
 1164 
 1165         MPASS(w->w_refcount == 0);
 1166         if (w->w_class->lc_flags & LC_SLEEPLOCK)
 1167                 list = &w_sleep;
 1168         else
 1169                 list = &w_spin;
 1170         /*
 1171          * First, we run through the entire tree looking for any
 1172          * witnesses that the outgoing witness is a child of.  For
 1173          * each parent that we find, we reparent all the direct
 1174          * children of the outgoing witness to its parent.
 1175          */
 1176         STAILQ_FOREACH(parent, list, w_typelist) {
 1177                 if (!isitmychild(parent, w))
 1178                         continue;
 1179                 removechild(parent, w);
 1180                 if (!reparentchildren(parent, w))
 1181                         return (0);
 1182         }
 1183 
 1184         /*
 1185          * Now we go through and free up the child list of the
 1186          * outgoing witness.
 1187          */
 1188         for (wcl = w->w_children; wcl != NULL; wcl = nwcl) {
 1189                 nwcl = wcl->wcl_next;
 1190                 witness_child_free(wcl);
 1191         }
 1192 
 1193         /*
 1194          * Detach from various lists and free.
 1195          */
 1196         STAILQ_REMOVE(list, w, witness, w_typelist);
 1197         STAILQ_REMOVE(&w_all, w, witness, w_list);
 1198         witness_free(w);
 1199 
 1200         /* Finally, fixup the tree. */
 1201         return (rebalancetree(list));
 1202 }
 1203 
 1204 /*
 1205  * Prune an entire lock order tree.  We look for cases where a lock
 1206  * is now both a descendant and a direct child of a given lock.  In
 1207  * that case, we want to remove the direct child link from the tree.
 1208  *
 1209  * Returns false if insertchild() fails.
 1210  */
 1211 static int
 1212 rebalancetree(struct witness_list *list)
 1213 {
 1214         struct witness *child, *parent;
 1215 
 1216         STAILQ_FOREACH(child, list, w_typelist) {
 1217                 STAILQ_FOREACH(parent, list, w_typelist) {
 1218                         if (!isitmychild(parent, child))
 1219                                 continue;
 1220                         removechild(parent, child);
 1221                         if (isitmydescendant(parent, child))
 1222                                 continue;
 1223                         if (!insertchild(parent, child))
 1224                                 return (0);
 1225                 }
 1226         }
 1227         witness_levelall();
 1228         return (1);
 1229 }
 1230 
 1231 /*
 1232  * Add "child" as a direct child of "parent".  Returns false if
 1233  * we fail due to out of memory.
 1234  */
 1235 static int
 1236 insertchild(struct witness *parent, struct witness *child)
 1237 {
 1238         struct witness_child_list_entry **wcl;
 1239 
 1240         MPASS(child != NULL && parent != NULL);
 1241 
 1242         /*
 1243          * Insert "child" after "parent"
 1244          */
 1245         wcl = &parent->w_children;
 1246         while (*wcl != NULL && (*wcl)->wcl_count == WITNESS_NCHILDREN)
 1247                 wcl = &(*wcl)->wcl_next;
 1248         if (*wcl == NULL) {
 1249                 *wcl = witness_child_get();
 1250                 if (*wcl == NULL)
 1251                         return (0);
 1252         }
 1253         (*wcl)->wcl_children[(*wcl)->wcl_count++] = child;
 1254 
 1255         return (1);
 1256 }
 1257 
 1258 /*
 1259  * Make all the direct descendants of oldparent be direct descendants
 1260  * of newparent.
 1261  */
 1262 static int
 1263 reparentchildren(struct witness *newparent, struct witness *oldparent)
 1264 {
 1265         struct witness_child_list_entry *wcl;
 1266         int i;
 1267 
 1268         /* Avoid making a witness a child of itself. */
 1269         MPASS(!isitmychild(oldparent, newparent));
 1270         
 1271         for (wcl = oldparent->w_children; wcl != NULL; wcl = wcl->wcl_next)
 1272                 for (i = 0; i < wcl->wcl_count; i++)
 1273                         if (!insertchild(newparent, wcl->wcl_children[i]))
 1274                                 return (0);
 1275         return (1);
 1276 }
 1277 
 1278 static int
 1279 itismychild(struct witness *parent, struct witness *child)
 1280 {
 1281         struct witness_list *list;
 1282 
 1283         MPASS(child != NULL && parent != NULL);
 1284         if ((parent->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)) !=
 1285             (child->w_class->lc_flags & (LC_SLEEPLOCK | LC_SPINLOCK)))
 1286                 panic(
 1287                 "%s: parent (%s) and child (%s) are not the same lock type",
 1288                     __func__, parent->w_class->lc_name,
 1289                     child->w_class->lc_name);
 1290 
 1291         if (!insertchild(parent, child))
 1292                 return (0);
 1293 
 1294         if (parent->w_class->lc_flags & LC_SLEEPLOCK)
 1295                 list = &w_sleep;
 1296         else
 1297                 list = &w_spin;
 1298         return (rebalancetree(list));
 1299 }
 1300 
 1301 static void
 1302 removechild(struct witness *parent, struct witness *child)
 1303 {
 1304         struct witness_child_list_entry **wcl, *wcl1;
 1305         int i;
 1306 
 1307         for (wcl = &parent->w_children; *wcl != NULL; wcl = &(*wcl)->wcl_next)
 1308                 for (i = 0; i < (*wcl)->wcl_count; i++)
 1309                         if ((*wcl)->wcl_children[i] == child)
 1310                                 goto found;
 1311         return;
 1312 found:
 1313         (*wcl)->wcl_count--;
 1314         if ((*wcl)->wcl_count > i)
 1315                 (*wcl)->wcl_children[i] =
 1316                     (*wcl)->wcl_children[(*wcl)->wcl_count];
 1317         MPASS((*wcl)->wcl_children[i] != NULL);
 1318         if ((*wcl)->wcl_count != 0)
 1319                 return;
 1320         wcl1 = *wcl;
 1321         *wcl = wcl1->wcl_next;
 1322         witness_child_free(wcl1);
 1323 }
 1324 
 1325 static int
 1326 isitmychild(struct witness *parent, struct witness *child)
 1327 {
 1328         struct witness_child_list_entry *wcl;
 1329         int i;
 1330 
 1331         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
 1332                 for (i = 0; i < wcl->wcl_count; i++) {
 1333                         if (wcl->wcl_children[i] == child)
 1334                                 return (1);
 1335                 }
 1336         }
 1337         return (0);
 1338 }
 1339 
 1340 static int
 1341 isitmydescendant(struct witness *parent, struct witness *child)
 1342 {
 1343         struct witness_child_list_entry *wcl;
 1344         int i, j;
 1345 
 1346         if (isitmychild(parent, child))
 1347                 return (1);
 1348         j = 0;
 1349         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next) {
 1350                 MPASS(j < 1000);
 1351                 for (i = 0; i < wcl->wcl_count; i++) {
 1352                         if (isitmydescendant(wcl->wcl_children[i], child))
 1353                                 return (1);
 1354                 }
 1355                 j++;
 1356         }
 1357         return (0);
 1358 }
 1359 
 1360 static void
 1361 witness_levelall (void)
 1362 {
 1363         struct witness_list *list;
 1364         struct witness *w, *w1;
 1365 
 1366         /*
 1367          * First clear all levels.
 1368          */
 1369         STAILQ_FOREACH(w, &w_all, w_list) {
 1370                 w->w_level = 0;
 1371         }
 1372 
 1373         /*
 1374          * Look for locks with no parent and level all their descendants.
 1375          */
 1376         STAILQ_FOREACH(w, &w_all, w_list) {
 1377                 /*
 1378                  * This is just an optimization, technically we could get
 1379                  * away just walking the all list each time.
 1380                  */
 1381                 if (w->w_class->lc_flags & LC_SLEEPLOCK)
 1382                         list = &w_sleep;
 1383                 else
 1384                         list = &w_spin;
 1385                 STAILQ_FOREACH(w1, list, w_typelist) {
 1386                         if (isitmychild(w1, w))
 1387                                 goto skip;
 1388                 }
 1389                 witness_leveldescendents(w, 0);
 1390         skip:
 1391                 ;       /* silence GCC 3.x */
 1392         }
 1393 }
 1394 
 1395 static void
 1396 witness_leveldescendents(struct witness *parent, int level)
 1397 {
 1398         struct witness_child_list_entry *wcl;
 1399         int i;
 1400 
 1401         if (parent->w_level < level)
 1402                 parent->w_level = level;
 1403         level++;
 1404         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
 1405                 for (i = 0; i < wcl->wcl_count; i++)
 1406                         witness_leveldescendents(wcl->wcl_children[i], level);
 1407 }
 1408 
 1409 static void
 1410 witness_displaydescendants(void(*prnt)(const char *fmt, ...),
 1411                            struct witness *parent, int indent)
 1412 {
 1413         struct witness_child_list_entry *wcl;
 1414         int i, level;
 1415 
 1416         level = parent->w_level;
 1417         prnt("%-2d", level);
 1418         for (i = 0; i < indent; i++)
 1419                 prnt(" ");
 1420         if (parent->w_refcount > 0)
 1421                 prnt("%s", parent->w_name);
 1422         else
 1423                 prnt("(dead)");
 1424         if (parent->w_displayed) {
 1425                 prnt(" -- (already displayed)\n");
 1426                 return;
 1427         }
 1428         parent->w_displayed = 1;
 1429         if (parent->w_refcount > 0) {
 1430                 if (parent->w_file != NULL)
 1431                         prnt(" -- last acquired @ %s:%d", parent->w_file,
 1432                             parent->w_line);
 1433         }
 1434         prnt("\n");
 1435         for (wcl = parent->w_children; wcl != NULL; wcl = wcl->wcl_next)
 1436                 for (i = 0; i < wcl->wcl_count; i++)
 1437                             witness_displaydescendants(prnt,
 1438                                 wcl->wcl_children[i], indent + 1);
 1439 }
 1440 
 1441 #ifdef BLESSING
 1442 static int
 1443 blessed(struct witness *w1, struct witness *w2)
 1444 {
 1445         int i;
 1446         struct witness_blessed *b;
 1447 
 1448         for (i = 0; i < blessed_count; i++) {
 1449                 b = &blessed_list[i];
 1450                 if (strcmp(w1->w_name, b->b_lock1) == 0) {
 1451                         if (strcmp(w2->w_name, b->b_lock2) == 0)
 1452                                 return (1);
 1453                         continue;
 1454                 }
 1455                 if (strcmp(w1->w_name, b->b_lock2) == 0)
 1456                         if (strcmp(w2->w_name, b->b_lock1) == 0)
 1457                                 return (1);
 1458         }
 1459         return (0);
 1460 }
 1461 #endif
 1462 
 1463 static struct witness *
 1464 witness_get(void)
 1465 {
 1466         struct witness *w;
 1467 
 1468         if (witness_watch == 0) {
 1469                 mtx_unlock_spin(&w_mtx);
 1470                 return (NULL);
 1471         }
 1472         if (STAILQ_EMPTY(&w_free)) {
 1473                 witness_watch = 0;
 1474                 mtx_unlock_spin(&w_mtx);
 1475                 printf("%s: witness exhausted\n", __func__);
 1476                 return (NULL);
 1477         }
 1478         w = STAILQ_FIRST(&w_free);
 1479         STAILQ_REMOVE_HEAD(&w_free, w_list);
 1480         bzero(w, sizeof(*w));
 1481         return (w);
 1482 }
 1483 
 1484 static void
 1485 witness_free(struct witness *w)
 1486 {
 1487 
 1488         STAILQ_INSERT_HEAD(&w_free, w, w_list);
 1489 }
 1490 
 1491 static struct witness_child_list_entry *
 1492 witness_child_get(void)
 1493 {
 1494         struct witness_child_list_entry *wcl;
 1495 
 1496         if (witness_watch == 0) {
 1497                 mtx_unlock_spin(&w_mtx);
 1498                 return (NULL);
 1499         }
 1500         wcl = w_child_free;
 1501         if (wcl == NULL) {
 1502                 witness_watch = 0;
 1503                 mtx_unlock_spin(&w_mtx);
 1504                 printf("%s: witness exhausted\n", __func__);
 1505                 return (NULL);
 1506         }
 1507         w_child_free = wcl->wcl_next;
 1508         bzero(wcl, sizeof(*wcl));
 1509         return (wcl);
 1510 }
 1511 
 1512 static void
 1513 witness_child_free(struct witness_child_list_entry *wcl)
 1514 {
 1515 
 1516         wcl->wcl_next = w_child_free;
 1517         w_child_free = wcl;
 1518 }
 1519 
 1520 static struct lock_list_entry *
 1521 witness_lock_list_get(void)
 1522 {
 1523         struct lock_list_entry *lle;
 1524 
 1525         if (witness_watch == 0)
 1526                 return (NULL);
 1527         mtx_lock_spin(&w_mtx);
 1528         lle = w_lock_list_free;
 1529         if (lle == NULL) {
 1530                 witness_watch = 0;
 1531                 mtx_unlock_spin(&w_mtx);
 1532                 printf("%s: witness exhausted\n", __func__);
 1533                 return (NULL);
 1534         }
 1535         w_lock_list_free = lle->ll_next;
 1536         mtx_unlock_spin(&w_mtx);
 1537         bzero(lle, sizeof(*lle));
 1538         return (lle);
 1539 }
 1540                 
 1541 static void
 1542 witness_lock_list_free(struct lock_list_entry *lle)
 1543 {
 1544 
 1545         mtx_lock_spin(&w_mtx);
 1546         lle->ll_next = w_lock_list_free;
 1547         w_lock_list_free = lle;
 1548         mtx_unlock_spin(&w_mtx);
 1549 }
 1550 
 1551 static struct lock_instance *
 1552 find_instance(struct lock_list_entry *lock_list, struct lock_object *lock)
 1553 {
 1554         struct lock_list_entry *lle;
 1555         struct lock_instance *instance;
 1556         int i;
 1557 
 1558         for (lle = lock_list; lle != NULL; lle = lle->ll_next)
 1559                 for (i = lle->ll_count - 1; i >= 0; i--) {
 1560                         instance = &lle->ll_children[i];
 1561                         if (instance->li_lock == lock)
 1562                                 return (instance);
 1563                 }
 1564         return (NULL);
 1565 }
 1566 
 1567 static void
 1568 witness_list_lock(struct lock_instance *instance)
 1569 {
 1570         struct lock_object *lock;
 1571 
 1572         lock = instance->li_lock;
 1573         printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
 1574             "exclusive" : "shared", lock->lo_class->lc_name, lock->lo_name);
 1575         if (lock->lo_type != lock->lo_name)
 1576                 printf(" (%s)", lock->lo_type);
 1577         printf(" r = %d (%p) locked @ %s:%d\n",
 1578             instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
 1579             instance->li_line);
 1580 }
 1581 
 1582 int
 1583 witness_list_locks(struct lock_list_entry **lock_list)
 1584 {
 1585         struct lock_list_entry *lle;
 1586         int i, nheld;
 1587 
 1588         nheld = 0;
 1589         for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
 1590                 for (i = lle->ll_count - 1; i >= 0; i--) {
 1591                         witness_list_lock(&lle->ll_children[i]);
 1592                         nheld++;
 1593                 }
 1594         return (nheld);
 1595 }
 1596 
 1597 void
 1598 witness_save(struct lock_object *lock, const char **filep, int *linep)
 1599 {
 1600         struct lock_instance *instance;
 1601 
 1602         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
 1603         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
 1604                 return;
 1605         if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
 1606                 panic("%s: lock (%s) %s is not a sleep lock", __func__,
 1607                     lock->lo_class->lc_name, lock->lo_name);
 1608         instance = find_instance(curthread->td_sleeplocks, lock);
 1609         if (instance == NULL)
 1610                 panic("%s: lock (%s) %s not locked", __func__,
 1611                     lock->lo_class->lc_name, lock->lo_name);
 1612         *filep = instance->li_file;
 1613         *linep = instance->li_line;
 1614 }
 1615 
 1616 void
 1617 witness_restore(struct lock_object *lock, const char *file, int line)
 1618 {
 1619         struct lock_instance *instance;
 1620 
 1621         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
 1622         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
 1623                 return;
 1624         if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) == 0)
 1625                 panic("%s: lock (%s) %s is not a sleep lock", __func__,
 1626                     lock->lo_class->lc_name, lock->lo_name);
 1627         instance = find_instance(curthread->td_sleeplocks, lock);
 1628         if (instance == NULL)
 1629                 panic("%s: lock (%s) %s not locked", __func__,
 1630                     lock->lo_class->lc_name, lock->lo_name);
 1631         lock->lo_witness->w_file = file;
 1632         lock->lo_witness->w_line = line;
 1633         instance->li_file = file;
 1634         instance->li_line = line;
 1635 }
 1636 
 1637 void
 1638 witness_assert(struct lock_object *lock, int flags, const char *file, int line)
 1639 {
 1640 #ifdef INVARIANT_SUPPORT
 1641         struct lock_instance *instance;
 1642 
 1643         if (lock->lo_witness == NULL || witness_watch == 0 || panicstr != NULL)
 1644                 return;
 1645         if ((lock->lo_class->lc_flags & LC_SLEEPLOCK) != 0)
 1646                 instance = find_instance(curthread->td_sleeplocks, lock);
 1647         else if ((lock->lo_class->lc_flags & LC_SPINLOCK) != 0)
 1648                 instance = find_instance(PCPU_GET(spinlocks), lock);
 1649         else {
 1650                 panic("Lock (%s) %s is not sleep or spin!",
 1651                     lock->lo_class->lc_name, lock->lo_name);
 1652                 return;
 1653         }
 1654         file = fixup_filename(file);
 1655         switch (flags) {
 1656         case LA_UNLOCKED:
 1657                 if (instance != NULL)
 1658                         panic("Lock (%s) %s locked @ %s:%d.",
 1659                             lock->lo_class->lc_name, lock->lo_name, file, line);
 1660                 break;
 1661         case LA_LOCKED:
 1662         case LA_LOCKED | LA_RECURSED:
 1663         case LA_LOCKED | LA_NOTRECURSED:
 1664         case LA_SLOCKED:
 1665         case LA_SLOCKED | LA_RECURSED:
 1666         case LA_SLOCKED | LA_NOTRECURSED:
 1667         case LA_XLOCKED:
 1668         case LA_XLOCKED | LA_RECURSED:
 1669         case LA_XLOCKED | LA_NOTRECURSED:
 1670                 if (instance == NULL) {
 1671                         panic("Lock (%s) %s not locked @ %s:%d.",
 1672                             lock->lo_class->lc_name, lock->lo_name, file, line);
 1673                         break;
 1674                 }
 1675                 if ((flags & LA_XLOCKED) != 0 &&
 1676                     (instance->li_flags & LI_EXCLUSIVE) == 0)
 1677                         panic("Lock (%s) %s not exclusively locked @ %s:%d.",
 1678                             lock->lo_class->lc_name, lock->lo_name, file, line);
 1679                 if ((flags & LA_SLOCKED) != 0 &&
 1680                     (instance->li_flags & LI_EXCLUSIVE) != 0)
 1681                         panic("Lock (%s) %s exclusively locked @ %s:%d.",
 1682                             lock->lo_class->lc_name, lock->lo_name, file, line);
 1683                 if ((flags & LA_RECURSED) != 0 &&
 1684                     (instance->li_flags & LI_RECURSEMASK) == 0)
 1685                         panic("Lock (%s) %s not recursed @ %s:%d.",
 1686                             lock->lo_class->lc_name, lock->lo_name, file, line);
 1687                 if ((flags & LA_NOTRECURSED) != 0 &&
 1688                     (instance->li_flags & LI_RECURSEMASK) != 0)
 1689                         panic("Lock (%s) %s recursed @ %s:%d.",
 1690                             lock->lo_class->lc_name, lock->lo_name, file, line);
 1691                 break;
 1692         default:
 1693                 panic("Invalid lock assertion at %s:%d.", file, line);
 1694 
 1695         }
 1696 #endif  /* INVARIANT_SUPPORT */
 1697 }
 1698 
 1699 #ifdef DDB
 1700 static void
 1701 witness_list(struct thread *td)
 1702 {
 1703 
 1704         KASSERT(!witness_cold, ("%s: witness_cold", __func__));
 1705         KASSERT(db_active, ("%s: not in the debugger", __func__));
 1706 
 1707         if (witness_watch == 0)
 1708                 return;
 1709 
 1710         witness_list_locks(&td->td_sleeplocks);
 1711 
 1712         /*
 1713          * We only handle spinlocks if td == curthread.  This is somewhat broken
 1714          * if td is currently executing on some other CPU and holds spin locks
 1715          * as we won't display those locks.  If we had a MI way of getting
 1716          * the per-cpu data for a given cpu then we could use
 1717          * td->td_oncpu to get the list of spinlocks for this thread
 1718          * and "fix" this.
 1719          *
 1720          * That still wouldn't really fix this unless we locked sched_lock
 1721          * or stopped the other CPU to make sure it wasn't changing the list
 1722          * out from under us.  It is probably best to just not try to handle
 1723          * threads on other CPU's for now.
 1724          */
 1725         if (td == curthread && PCPU_GET(spinlocks) != NULL)
 1726                 witness_list_locks(PCPU_PTR(spinlocks));
 1727 }
 1728 
 1729 DB_SHOW_COMMAND(locks, db_witness_list)
 1730 {
 1731         struct thread *td;
 1732         pid_t pid;
 1733         struct proc *p;
 1734 
 1735         if (have_addr) {
 1736                 pid = (addr % 16) + ((addr >> 4) % 16) * 10 +
 1737                     ((addr >> 8) % 16) * 100 + ((addr >> 12) % 16) * 1000 +
 1738                     ((addr >> 16) % 16) * 10000;
 1739                 /* sx_slock(&allproc_lock); */
 1740                 FOREACH_PROC_IN_SYSTEM(p) {
 1741                         if (p->p_pid == pid)
 1742                                 break;
 1743                 }
 1744                 /* sx_sunlock(&allproc_lock); */
 1745                 if (p == NULL) {
 1746                         db_printf("pid %d not found\n", pid);
 1747                         return;
 1748                 }
 1749                 FOREACH_THREAD_IN_PROC(p, td) {
 1750                         witness_list(td);
 1751                 }
 1752         } else {
 1753                 td = curthread;
 1754                 witness_list(td);
 1755         }
 1756 }
 1757 
 1758 DB_SHOW_COMMAND(witness, db_witness_display)
 1759 {
 1760 
 1761         witness_display(db_printf);
 1762 }
 1763 #endif

Cache object: 3c307df697e0a61a1057bdb64a4725b0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.