The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_lockdebug.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: subr_lockdebug.c,v 1.83 2022/09/02 06:01:38 nakayama Exp $     */
    2 
    3 /*-
    4  * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Andrew Doran.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * Basic lock debugging code shared among lock primitives.
   34  */
   35 
   36 #include <sys/cdefs.h>
   37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.83 2022/09/02 06:01:38 nakayama Exp $");
   38 
   39 #ifdef _KERNEL_OPT
   40 #include "opt_ddb.h"
   41 #endif
   42 
   43 #include <sys/param.h>
   44 #include <sys/proc.h>
   45 #include <sys/systm.h>
   46 #include <sys/kernel.h>
   47 #include <sys/kmem.h>
   48 #include <sys/lockdebug.h>
   49 #include <sys/sleepq.h>
   50 #include <sys/cpu.h>
   51 #include <sys/atomic.h>
   52 #include <sys/lock.h>
   53 #include <sys/rbtree.h>
   54 #include <sys/ksyms.h>
   55 #include <sys/kcov.h>
   56 
   57 #include <machine/lock.h>
   58 
   59 #ifdef DDB
   60 #include <machine/db_machdep.h>
   61 #include <ddb/db_interface.h>
   62 #include <ddb/db_access.h>
   63 #include <ddb/db_sym.h>
   64 #endif
   65 
   66 unsigned int            ld_panic;
   67 
   68 #ifdef LOCKDEBUG
   69 
   70 #ifdef __ia64__
   71 #define LD_BATCH_SHIFT  16
   72 #else
   73 #define LD_BATCH_SHIFT  9
   74 #endif
   75 #define LD_BATCH        (1 << LD_BATCH_SHIFT)
   76 #define LD_BATCH_MASK   (LD_BATCH - 1)
   77 #define LD_MAX_LOCKS    1048576
   78 #define LD_SLOP         16
   79 
   80 #define LD_LOCKED       0x01
   81 #define LD_SLEEPER      0x02
   82 
   83 #define LD_WRITE_LOCK   0x80000000
   84 
   85 typedef struct lockdebug {
   86         struct rb_node  ld_rb_node;
   87         __cpu_simple_lock_t ld_spinlock;
   88         _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
   89         _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
   90         volatile void   *ld_lock;
   91         lockops_t       *ld_lockops;
   92         struct lwp      *ld_lwp;
   93         uintptr_t       ld_locked;
   94         uintptr_t       ld_unlocked;
   95         uintptr_t       ld_initaddr;
   96         uint16_t        ld_shares;
   97         uint16_t        ld_cpu;
   98         uint8_t         ld_flags;
   99         uint8_t         ld_shwant;      /* advisory */
  100         uint8_t         ld_exwant;      /* advisory */
  101         uint8_t         ld_unused;
  102 } volatile lockdebug_t;
  103 
  104 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
  105 
  106 __cpu_simple_lock_t     ld_mod_lk;
  107 lockdebuglist_t         ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
  108 #ifdef _KERNEL
  109 lockdebuglist_t         ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
  110 #else
  111 extern lockdebuglist_t  ld_all;
  112 #define cpu_name(a)     "?"
  113 #define cpu_index(a)    -1
  114 #define curlwp          NULL
  115 #endif /* _KERNEL */
  116 int                     ld_nfree;
  117 int                     ld_freeptr;
  118 int                     ld_recurse;
  119 bool                    ld_nomore;
  120 lockdebug_t             ld_prime[LD_BATCH];
  121 
  122 #ifdef _KERNEL
  123 static void     lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
  124     const char *, bool);
  125 static int      lockdebug_more(int);
  126 static void     lockdebug_init(void);
  127 static void     lockdebug_dump(lwp_t *, lockdebug_t *,
  128     void (*)(const char *, ...)
  129     __printflike(1, 2));
  130 
  131 static signed int
  132 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
  133 {
  134         const lockdebug_t *ld1 = n1;
  135         const lockdebug_t *ld2 = n2;
  136         const uintptr_t a = (uintptr_t)ld1->ld_lock;
  137         const uintptr_t b = (uintptr_t)ld2->ld_lock;
  138 
  139         if (a < b)
  140                 return -1;
  141         if (a > b)
  142                 return 1;
  143         return 0;
  144 }
  145 
  146 static signed int
  147 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
  148 {
  149         const lockdebug_t *ld = n;
  150         const uintptr_t a = (uintptr_t)ld->ld_lock;
  151         const uintptr_t b = (uintptr_t)key;
  152 
  153         if (a < b)
  154                 return -1;
  155         if (a > b)
  156                 return 1;
  157         return 0;
  158 }
  159 
  160 static rb_tree_t ld_rb_tree;
  161 
  162 static const rb_tree_ops_t ld_rb_tree_ops = {
  163         .rbto_compare_nodes = ld_rbto_compare_nodes,
  164         .rbto_compare_key = ld_rbto_compare_key,
  165         .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
  166         .rbto_context = NULL
  167 };
  168 
  169 static inline lockdebug_t *
  170 lockdebug_lookup1(const volatile void *lock)
  171 {
  172         lockdebug_t *ld;
  173         struct cpu_info *ci;
  174 
  175         ci = curcpu();
  176         __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
  177         ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
  178         __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
  179         if (ld == NULL) {
  180                 return NULL;
  181         }
  182         __cpu_simple_lock(&ld->ld_spinlock);
  183 
  184         return ld;
  185 }
  186 
  187 static void
  188 lockdebug_lock_cpus(void)
  189 {
  190         CPU_INFO_ITERATOR cii;
  191         struct cpu_info *ci;
  192 
  193         for (CPU_INFO_FOREACH(cii, ci)) {
  194                 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
  195         }
  196 }
  197 
  198 static void
  199 lockdebug_unlock_cpus(void)
  200 {
  201         CPU_INFO_ITERATOR cii;
  202         struct cpu_info *ci;
  203 
  204         for (CPU_INFO_FOREACH(cii, ci)) {
  205                 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
  206         }
  207 }
  208 
  209 /*
  210  * lockdebug_lookup:
  211  *
  212  *      Find a lockdebug structure by a pointer to a lock and return it locked.
  213  */
  214 static inline lockdebug_t *
  215 lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
  216     uintptr_t where)
  217 {
  218         lockdebug_t *ld;
  219 
  220         kcov_silence_enter();
  221         ld = lockdebug_lookup1(lock);
  222         kcov_silence_leave();
  223 
  224         if (__predict_false(ld == NULL)) {
  225                 panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
  226                     PRIxPTR ")", func, line, lock, where);
  227         }
  228         return ld;
  229 }
  230 
  231 /*
  232  * lockdebug_init:
  233  *
  234  *      Initialize the lockdebug system.  Allocate an initial pool of
  235  *      lockdebug structures before the VM system is up and running.
  236  */
  237 static void
  238 lockdebug_init(void)
  239 {
  240         lockdebug_t *ld;
  241         int i;
  242 
  243         TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
  244         TAILQ_INIT(&curlwp->l_ld_locks);
  245         __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
  246         __cpu_simple_lock_init(&ld_mod_lk);
  247 
  248         rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
  249 
  250         ld = ld_prime;
  251         for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
  252                 __cpu_simple_lock_init(&ld->ld_spinlock);
  253                 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
  254                 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
  255         }
  256         ld_freeptr = 1;
  257         ld_nfree = LD_BATCH - 1;
  258 }
  259 
  260 /*
  261  * lockdebug_alloc:
  262  *
  263  *      A lock is being initialized, so allocate an associated debug
  264  *      structure.
  265  */
  266 bool
  267 lockdebug_alloc(const char *func, size_t line, volatile void *lock,
  268     lockops_t *lo, uintptr_t initaddr)
  269 {
  270         struct cpu_info *ci;
  271         lockdebug_t *ld;
  272         int s;
  273 
  274         if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
  275                 return false;
  276         if (__predict_false(ld_freeptr == 0))
  277                 lockdebug_init();
  278 
  279         s = splhigh();
  280         __cpu_simple_lock(&ld_mod_lk);
  281         if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
  282                 __cpu_simple_unlock(&ld_mod_lk);
  283                 lockdebug_abort1(func, line, ld, s, "already initialized",
  284                     true);
  285                 return false;
  286         }
  287 
  288         /*
  289          * Pinch a new debug structure.  We may recurse because we call
  290          * kmem_alloc(), which may need to initialize new locks somewhere
  291          * down the path.  If not recursing, we try to maintain at least
  292          * LD_SLOP structures free, which should hopefully be enough to
  293          * satisfy kmem_alloc().  If we can't provide a structure, not to
  294          * worry: we'll just mark the lock as not having an ID.
  295          */
  296         ci = curcpu();
  297         ci->ci_lkdebug_recurse++;
  298         if (TAILQ_EMPTY(&ld_free)) {
  299                 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
  300                         ci->ci_lkdebug_recurse--;
  301                         __cpu_simple_unlock(&ld_mod_lk);
  302                         splx(s);
  303                         return false;
  304                 }
  305                 s = lockdebug_more(s);
  306         } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
  307                 s = lockdebug_more(s);
  308         }
  309         if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
  310                 __cpu_simple_unlock(&ld_mod_lk);
  311                 splx(s);
  312                 return false;
  313         }
  314         TAILQ_REMOVE(&ld_free, ld, ld_chain);
  315         ld_nfree--;
  316         ci->ci_lkdebug_recurse--;
  317 
  318         if (__predict_false(ld->ld_lock != NULL)) {
  319                 panic("%s,%zu: corrupt table ld %p", func, line, ld);
  320         }
  321 
  322         /* Initialise the structure. */
  323         ld->ld_lock = lock;
  324         ld->ld_lockops = lo;
  325         ld->ld_locked = 0;
  326         ld->ld_unlocked = 0;
  327         ld->ld_lwp = NULL;
  328         ld->ld_initaddr = initaddr;
  329         ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
  330         lockdebug_lock_cpus();
  331         (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
  332         lockdebug_unlock_cpus();
  333         __cpu_simple_unlock(&ld_mod_lk);
  334 
  335         splx(s);
  336         return true;
  337 }
  338 
  339 /*
  340  * lockdebug_free:
  341  *
  342  *      A lock is being destroyed, so release debugging resources.
  343  */
  344 void
  345 lockdebug_free(const char *func, size_t line, volatile void *lock)
  346 {
  347         lockdebug_t *ld;
  348         int s;
  349 
  350         if (__predict_false(panicstr != NULL || ld_panic))
  351                 return;
  352 
  353         s = splhigh();
  354         __cpu_simple_lock(&ld_mod_lk);
  355         ld = lockdebug_lookup(func, line, lock,
  356             (uintptr_t) __builtin_return_address(0));
  357         if (__predict_false(ld == NULL)) {
  358                 __cpu_simple_unlock(&ld_mod_lk);
  359                 panic("%s,%zu: destroying uninitialized object %p"
  360                     "(ld_lock=%p)", func, line, lock, ld->ld_lock);
  361                 return;
  362         }
  363         if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
  364             ld->ld_shares != 0)) {
  365                 __cpu_simple_unlock(&ld_mod_lk);
  366                 lockdebug_abort1(func, line, ld, s, "is locked or in use",
  367                     true);
  368                 return;
  369         }
  370         lockdebug_lock_cpus();
  371         rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
  372         lockdebug_unlock_cpus();
  373         ld->ld_lock = NULL;
  374         TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
  375         ld_nfree++;
  376         __cpu_simple_unlock(&ld->ld_spinlock);
  377         __cpu_simple_unlock(&ld_mod_lk);
  378         splx(s);
  379 }
  380 
  381 /*
  382  * lockdebug_more:
  383  *
  384  *      Allocate a batch of debug structures and add to the free list.
  385  *      Must be called with ld_mod_lk held.
  386  */
  387 static int
  388 lockdebug_more(int s)
  389 {
  390         lockdebug_t *ld;
  391         void *block;
  392         int i, base, m;
  393 
  394         /*
  395          * Can't call kmem_alloc() if in interrupt context.  XXX We could
  396          * deadlock, because we don't know which locks the caller holds.
  397          */
  398         if (cpu_intr_p() || cpu_softintr_p()) {
  399                 return s;
  400         }
  401 
  402         while (ld_nfree < LD_SLOP) {
  403                 __cpu_simple_unlock(&ld_mod_lk);
  404                 splx(s);
  405                 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
  406                 s = splhigh();
  407                 __cpu_simple_lock(&ld_mod_lk);
  408 
  409                 if (ld_nfree > LD_SLOP) {
  410                         /* Somebody beat us to it. */
  411                         __cpu_simple_unlock(&ld_mod_lk);
  412                         splx(s);
  413                         kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
  414                         s = splhigh();
  415                         __cpu_simple_lock(&ld_mod_lk);
  416                         continue;
  417                 }
  418 
  419                 base = ld_freeptr;
  420                 ld_nfree += LD_BATCH;
  421                 ld = block;
  422                 base <<= LD_BATCH_SHIFT;
  423                 m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
  424 
  425                 if (m == LD_MAX_LOCKS)
  426                         ld_nomore = true;
  427 
  428                 for (i = base; i < m; i++, ld++) {
  429                         __cpu_simple_lock_init(&ld->ld_spinlock);
  430                         TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
  431                         TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
  432                 }
  433 
  434                 membar_producer();
  435         }
  436 
  437         return s;
  438 }
  439 
  440 /*
  441  * lockdebug_wantlock:
  442  *
  443  *      Process the preamble to a lock acquire.  The "shared"
  444  *      parameter controls which ld_{ex,sh}want counter is
  445  *      updated; a negative value of shared updates neither.
  446  */
  447 void
  448 lockdebug_wantlock(const char *func, size_t line,
  449     const volatile void *lock, uintptr_t where, int shared)
  450 {
  451         struct lwp *l = curlwp;
  452         lockdebug_t *ld;
  453         bool recurse;
  454         int s;
  455 
  456         (void)shared;
  457         recurse = false;
  458 
  459         if (__predict_false(panicstr != NULL || ld_panic))
  460                 return;
  461 
  462         s = splhigh();
  463         if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
  464                 splx(s);
  465                 return;
  466         }
  467         if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
  468                 if ((ld->ld_flags & LD_SLEEPER) != 0) {
  469                         if (ld->ld_lwp == l)
  470                                 recurse = true;
  471                 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
  472                         recurse = true;
  473         }
  474         if (cpu_intr_p()) {
  475                 if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
  476                         lockdebug_abort1(func, line, ld, s,
  477                             "acquiring sleep lock from interrupt context",
  478                             true);
  479                         return;
  480                 }
  481         }
  482         if (shared > 0)
  483                 ld->ld_shwant++;
  484         else if (shared == 0)
  485                 ld->ld_exwant++;
  486         if (__predict_false(recurse)) {
  487                 lockdebug_abort1(func, line, ld, s, "locking against myself",
  488                     true);
  489                 return;
  490         }
  491         if (l->l_ld_wanted == NULL) {
  492                 l->l_ld_wanted = ld;
  493         }
  494         __cpu_simple_unlock(&ld->ld_spinlock);
  495         splx(s);
  496 }
  497 
  498 /*
  499  * lockdebug_locked:
  500  *
  501  *      Process a lock acquire operation.
  502  */
  503 void
  504 lockdebug_locked(const char *func, size_t line,
  505     volatile void *lock, void *cvlock, uintptr_t where, int shared)
  506 {
  507         struct lwp *l = curlwp;
  508         lockdebug_t *ld;
  509         int s;
  510 
  511         if (__predict_false(panicstr != NULL || ld_panic))
  512                 return;
  513 
  514         s = splhigh();
  515         if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
  516                 splx(s);
  517                 return;
  518         }
  519         if (shared) {
  520                 l->l_shlocks++;
  521                 ld->ld_locked = where;
  522                 ld->ld_shares++;
  523                 ld->ld_shwant--;
  524         } else {
  525                 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
  526                         lockdebug_abort1(func, line, ld, s, "already locked",
  527                             true);
  528                         return;
  529                 }
  530                 ld->ld_flags |= LD_LOCKED;
  531                 ld->ld_locked = where;
  532                 ld->ld_exwant--;
  533                 if ((ld->ld_flags & LD_SLEEPER) != 0) {
  534                         TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
  535                 } else {
  536                         TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
  537                             ld, ld_chain);
  538                 }
  539         }
  540         ld->ld_cpu = (uint16_t)cpu_index(curcpu());
  541         ld->ld_lwp = l;
  542         __cpu_simple_unlock(&ld->ld_spinlock);
  543         if (l->l_ld_wanted == ld) {
  544                 l->l_ld_wanted = NULL;
  545         }
  546         splx(s);
  547 }
  548 
  549 /*
  550  * lockdebug_unlocked:
  551  *
  552  *      Process a lock release operation.
  553  */
  554 void
  555 lockdebug_unlocked(const char *func, size_t line,
  556     volatile void *lock, uintptr_t where, int shared)
  557 {
  558         struct lwp *l = curlwp;
  559         lockdebug_t *ld;
  560         int s;
  561 
  562         if (__predict_false(panicstr != NULL || ld_panic))
  563                 return;
  564 
  565         s = splhigh();
  566         if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
  567                 splx(s);
  568                 return;
  569         }
  570         if (shared) {
  571                 if (__predict_false(l->l_shlocks == 0)) {
  572                         lockdebug_abort1(func, line, ld, s,
  573                             "no shared locks held by LWP", true);
  574                         return;
  575                 }
  576                 if (__predict_false(ld->ld_shares == 0)) {
  577                         lockdebug_abort1(func, line, ld, s,
  578                             "no shared holds on this lock", true);
  579                         return;
  580                 }
  581                 l->l_shlocks--;
  582                 ld->ld_shares--;
  583                 if (ld->ld_lwp == l) {
  584                         ld->ld_unlocked = where;
  585                         ld->ld_lwp = NULL;
  586                 }
  587                 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
  588                         ld->ld_cpu = (uint16_t)-1;
  589         } else {
  590                 if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
  591                         lockdebug_abort1(func, line, ld, s, "not locked", true);
  592                         return;
  593                 }
  594 
  595                 if ((ld->ld_flags & LD_SLEEPER) != 0) {
  596                         if (__predict_false(ld->ld_lwp != curlwp)) {
  597                                 lockdebug_abort1(func, line, ld, s,
  598                                     "not held by current LWP", true);
  599                                 return;
  600                         }
  601                         TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
  602                 } else {
  603                         uint16_t idx = (uint16_t)cpu_index(curcpu());
  604                         if (__predict_false(ld->ld_cpu != idx)) {
  605                                 lockdebug_abort1(func, line, ld, s,
  606                                     "not held by current CPU", true);
  607                                 return;
  608                         }
  609                         TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
  610                             ld_chain);
  611                 }
  612                 ld->ld_flags &= ~LD_LOCKED;
  613                 ld->ld_unlocked = where;
  614                 ld->ld_lwp = NULL;
  615         }
  616         __cpu_simple_unlock(&ld->ld_spinlock);
  617         splx(s);
  618 }
  619 
  620 /*
  621  * lockdebug_barrier:
  622  *
  623  *      Panic if we hold more than one specified lock, and optionally, if we
  624  *      hold any sleep locks.
  625  */
  626 void
  627 lockdebug_barrier(const char *func, size_t line, volatile void *onelock,
  628     int slplocks)
  629 {
  630         struct lwp *l = curlwp;
  631         lockdebug_t *ld;
  632         int s;
  633 
  634         if (__predict_false(panicstr != NULL || ld_panic))
  635                 return;
  636 
  637         s = splhigh();
  638         if ((l->l_pflag & LP_INTR) == 0) {
  639                 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
  640                         if (ld->ld_lock == onelock) {
  641                                 continue;
  642                         }
  643                         __cpu_simple_lock(&ld->ld_spinlock);
  644                         lockdebug_abort1(func, line, ld, s,
  645                             "spin lock held", true);
  646                         return;
  647                 }
  648         }
  649         if (slplocks) {
  650                 splx(s);
  651                 return;
  652         }
  653         ld = TAILQ_FIRST(&l->l_ld_locks);
  654         if (__predict_false(ld != NULL && ld->ld_lock != onelock)) {
  655                 __cpu_simple_lock(&ld->ld_spinlock);
  656                 lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
  657                 return;
  658         }
  659         splx(s);
  660         if (l->l_shlocks != 0) {
  661                 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
  662                         if (ld->ld_lock == onelock) {
  663                                 continue;
  664                         }
  665                         if (ld->ld_lwp == l)
  666                                 lockdebug_dump(l, ld, printf);
  667                 }
  668                 panic("%s,%zu: holding %d shared locks", func, line,
  669                     l->l_shlocks);
  670         }
  671 }
  672 
  673 /*
  674  * lockdebug_mem_check:
  675  *
  676  *      Check for in-use locks within a memory region that is
  677  *      being freed.
  678  */
  679 void
  680 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
  681 {
  682         lockdebug_t *ld;
  683         struct cpu_info *ci;
  684         int s;
  685 
  686         if (__predict_false(panicstr != NULL || ld_panic))
  687                 return;
  688 
  689         kcov_silence_enter();
  690 
  691         s = splhigh();
  692         ci = curcpu();
  693         __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
  694         ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
  695         if (ld != NULL) {
  696                 const uintptr_t lock = (uintptr_t)ld->ld_lock;
  697 
  698                 if (__predict_false((uintptr_t)base > lock))
  699                         panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
  700                             func, line, ld, base, sz);
  701                 if (lock >= (uintptr_t)base + sz)
  702                         ld = NULL;
  703         }
  704         __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
  705         if (__predict_false(ld != NULL)) {
  706                 __cpu_simple_lock(&ld->ld_spinlock);
  707                 lockdebug_abort1(func, line, ld, s,
  708                     "allocation contains active lock", !cold);
  709                 kcov_silence_leave();
  710                 return;
  711         }
  712         splx(s);
  713 
  714         kcov_silence_leave();
  715 }
  716 #endif /* _KERNEL */
  717 
  718 /*
  719  * lockdebug_dump:
  720  *
  721  *      Dump information about a lock on panic, or for DDB.
  722  */
  723 static void
  724 lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...)
  725     __printflike(1, 2))
  726 {
  727         int sleeper = (ld->ld_flags & LD_SLEEPER);
  728         lockops_t *lo = ld->ld_lockops;
  729         char locksym[128], initsym[128], lockedsym[128], unlockedsym[128];
  730 
  731 #ifdef DDB
  732         db_symstr(locksym, sizeof(locksym), (db_expr_t)(intptr_t)ld->ld_lock,
  733             DB_STGY_ANY);
  734         db_symstr(initsym, sizeof(initsym), (db_expr_t)ld->ld_initaddr,
  735             DB_STGY_PROC);
  736         db_symstr(lockedsym, sizeof(lockedsym), (db_expr_t)ld->ld_locked,
  737             DB_STGY_PROC);
  738         db_symstr(unlockedsym, sizeof(unlockedsym), (db_expr_t)ld->ld_unlocked,
  739             DB_STGY_PROC);
  740 #else
  741         snprintf(locksym, sizeof(locksym), "%#018lx",
  742             (unsigned long)ld->ld_lock);
  743         snprintf(initsym, sizeof(initsym), "%#018lx",
  744             (unsigned long)ld->ld_initaddr);
  745         snprintf(lockedsym, sizeof(lockedsym), "%#018lx",
  746             (unsigned long)ld->ld_locked);
  747         snprintf(unlockedsym, sizeof(unlockedsym), "%#018lx",
  748             (unsigned long)ld->ld_unlocked);
  749 #endif
  750 
  751         (*pr)(
  752             "lock address : %s\n"
  753             "type         : %s\n"
  754             "initialized  : %s",
  755             locksym, (sleeper ? "sleep/adaptive" : "spin"),
  756             initsym);
  757 
  758 #ifndef _KERNEL
  759         lockops_t los;
  760         lo = &los;
  761         db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo);
  762 #endif
  763         (*pr)("\n"
  764             "shared holds : %18u exclusive: %18u\n"
  765             "shares wanted: %18u exclusive: %18u\n"
  766             "relevant cpu : %18u last held: %18u\n"
  767             "relevant lwp : %#018lx last held: %#018lx\n"
  768             "last locked%c : %s\n"
  769             "unlocked%c    : %s\n",
  770             (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
  771             (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
  772             (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu,
  773             (long)l, (long)ld->ld_lwp,
  774             ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
  775             lockedsym,
  776             ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
  777             unlockedsym);
  778 
  779 #ifdef _KERNEL
  780         if (lo->lo_dump != NULL)
  781                 (*lo->lo_dump)(ld->ld_lock, pr);
  782 
  783         if (sleeper) {
  784                 turnstile_print(ld->ld_lock, pr);
  785         }
  786 #endif
  787 }
  788 
  789 #ifdef _KERNEL
  790 /*
  791  * lockdebug_abort1:
  792  *
  793  *      An error has been trapped - dump lock info and panic.
  794  */
  795 static void
  796 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
  797                  const char *msg, bool dopanic)
  798 {
  799 
  800         /*
  801          * Don't make the situation worse if the system is already going
  802          * down in flames.  Once a panic is triggered, lockdebug state
  803          * becomes stale and cannot be trusted.
  804          */
  805         if (atomic_inc_uint_nv(&ld_panic) != 1) {
  806                 __cpu_simple_unlock(&ld->ld_spinlock);
  807                 splx(s);
  808                 return;
  809         }
  810 
  811         printf("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
  812             func, line, msg);
  813         lockdebug_dump(curlwp, ld, printf);
  814         __cpu_simple_unlock(&ld->ld_spinlock);
  815         splx(s);
  816         printf("\n");
  817         if (dopanic)
  818                 panic("LOCKDEBUG: %s error: %s,%zu: %s",
  819                     ld->ld_lockops->lo_name, func, line, msg);
  820 }
  821 
  822 #endif /* _KERNEL */
  823 #endif  /* LOCKDEBUG */
  824 
  825 /*
  826  * lockdebug_lock_print:
  827  *
  828  *      Handle the DDB 'show lock' command.
  829  */
  830 #ifdef DDB
  831 void
  832 lockdebug_lock_print(void *addr,
  833     void (*pr)(const char *, ...) __printflike(1, 2))
  834 {
  835 #ifdef LOCKDEBUG
  836         lockdebug_t *ld, lds;
  837 
  838         TAILQ_FOREACH(ld, &ld_all, ld_achain) {
  839                 db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds));
  840                 ld = &lds;
  841                 if (ld->ld_lock == NULL)
  842                         continue;
  843                 if (addr == NULL || ld->ld_lock == addr) {
  844                         lockdebug_dump(curlwp, ld, pr);
  845                         if (addr != NULL)
  846                                 return;
  847                 }
  848         }
  849         if (addr != NULL) {
  850                 (*pr)("Sorry, no record of a lock with address %p found.\n",
  851                     addr);
  852         }
  853 #else
  854         char sym[128];
  855         uintptr_t word;
  856 
  857         (*pr)("WARNING: lock print is unreliable without LOCKDEBUG\n");
  858         db_symstr(sym, sizeof(sym), (db_expr_t)(intptr_t)addr, DB_STGY_ANY);
  859         db_read_bytes((db_addr_t)addr, sizeof(word), (char *)&word);
  860         (*pr)("%s: possible owner: %p, bits: 0x%" PRIxPTR "\n", sym,
  861             (void *)(word & ~(uintptr_t)ALIGNBYTES), word & ALIGNBYTES);
  862 #endif  /* LOCKDEBUG */
  863 }
  864 
  865 #ifdef _KERNEL
  866 #ifdef LOCKDEBUG
  867 static void
  868 lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
  869     void (*pr)(const char *, ...) __printflike(1, 2))
  870 {
  871         char sym[128];
  872 
  873 #ifdef DDB
  874         db_symstr(sym, sizeof(sym), (db_expr_t)ld->ld_initaddr, DB_STGY_PROC);
  875 #else
  876         snprintf(sym, sizeof(sym), "%p", (void *)ld->ld_initaddr);
  877 #endif
  878         (*pr)("* Lock %d (initialized at %s)\n", i++, sym);
  879         lockdebug_dump(l, ld, pr);
  880 }
  881 
  882 static void
  883 lockdebug_show_trace(const void *ptr,
  884     void (*pr)(const char *, ...) __printflike(1, 2))
  885 {
  886 
  887         db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
  888 }
  889 
  890 static void
  891 lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
  892     bool show_trace)
  893 {
  894         struct proc *p;
  895 
  896         LIST_FOREACH(p, &allproc, p_list) {
  897                 struct lwp *l;
  898                 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
  899                         lockdebug_t *ld;
  900                         int i = 0;
  901                         if (TAILQ_EMPTY(&l->l_ld_locks) &&
  902                             l->l_ld_wanted == NULL) {
  903                                 continue;
  904                         }
  905                         (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n",
  906                             p->p_pid, l->l_lid,
  907                             l->l_name ? l->l_name : p->p_comm, l, l->l_stat);
  908                         if (!TAILQ_EMPTY(&l->l_ld_locks)) {
  909                                 (*pr)("\n*** Locks held: \n");
  910                                 TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
  911                                         (*pr)("\n");
  912                                         lockdebug_show_one(l, ld, i++, pr);
  913                                 }
  914                         } else {
  915                                 (*pr)("\n*** Locks held: none\n");
  916                         }
  917 
  918                         if (l->l_ld_wanted != NULL) {
  919                                 (*pr)("\n*** Locks wanted: \n\n");
  920                                 lockdebug_show_one(l, l->l_ld_wanted, 0, pr);
  921                         } else {
  922                                 (*pr)("\n*** Locks wanted: none\n");
  923                         }
  924                         if (show_trace) {
  925                                 (*pr)("\n*** Traceback: \n\n");
  926                                 lockdebug_show_trace(l, pr);
  927                                 (*pr)("\n");
  928                         }
  929                 }
  930         }
  931 }
  932 
  933 static void
  934 lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
  935     bool show_trace)
  936 {
  937         lockdebug_t *ld;
  938         CPU_INFO_ITERATOR cii;
  939         struct cpu_info *ci;
  940 
  941         for (CPU_INFO_FOREACH(cii, ci)) {
  942                 int i = 0;
  943                 if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
  944                         continue;
  945                 (*pr)("\n******* Locks held on %s:\n", cpu_name(ci));
  946                 TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
  947                         (*pr)("\n");
  948 #ifdef MULTIPROCESSOR
  949                         lockdebug_show_one(ci->ci_curlwp, ld, i++, pr);
  950                         if (show_trace)
  951                                 lockdebug_show_trace(ci->ci_curlwp, pr);
  952 #else
  953                         lockdebug_show_one(curlwp, ld, i++, pr);
  954                         if (show_trace)
  955                                 lockdebug_show_trace(curlwp, pr);
  956 #endif
  957                 }
  958         }
  959 }
  960 #endif /* _KERNEL */
  961 #endif  /* LOCKDEBUG */
  962 
  963 #ifdef _KERNEL
  964 void
  965 lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
  966     const char *modif)
  967 {
  968 #ifdef LOCKDEBUG
  969         bool show_trace = false;
  970         if (modif[0] == 't')
  971                 show_trace = true;
  972 
  973         (*pr)("[Locks tracked through LWPs]\n");
  974         lockdebug_show_all_locks_lwp(pr, show_trace);
  975         (*pr)("\n");
  976 
  977         (*pr)("[Locks tracked through CPUs]\n");
  978         lockdebug_show_all_locks_cpu(pr, show_trace);
  979         (*pr)("\n");
  980 #else
  981         (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
  982 #endif  /* LOCKDEBUG */
  983 }
  984 
  985 void
  986 lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
  987 {
  988 #ifdef LOCKDEBUG
  989         lockdebug_t *ld;
  990         void *_ld;
  991         uint32_t n_null = 0;
  992         uint32_t n_spin_mutex = 0;
  993         uint32_t n_adaptive_mutex = 0;
  994         uint32_t n_rwlock = 0;
  995         uint32_t n_others = 0;
  996 
  997         RB_TREE_FOREACH(_ld, &ld_rb_tree) {
  998                 ld = _ld;
  999                 if (ld->ld_lock == NULL) {
 1000                         n_null++;
 1001                         continue;
 1002                 }
 1003                 if (ld->ld_lockops->lo_name[0] == 'M') {
 1004                         if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
 1005                                 n_adaptive_mutex++;
 1006                         else
 1007                                 n_spin_mutex++;
 1008                         continue;
 1009                 }
 1010                 if (ld->ld_lockops->lo_name[0] == 'R') {
 1011                         n_rwlock++;
 1012                         continue;
 1013                 }
 1014                 n_others++;
 1015         }
 1016         (*pr)(
 1017             "spin mutex: %u\n"
 1018             "adaptive mutex: %u\n"
 1019             "rwlock: %u\n"
 1020             "null locks: %u\n"
 1021             "others: %u\n",
 1022             n_spin_mutex, n_adaptive_mutex, n_rwlock,
 1023             n_null, n_others);
 1024 #else
 1025         (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
 1026 #endif  /* LOCKDEBUG */
 1027 }
 1028 #endif /* _KERNEL */
 1029 #endif  /* DDB */
 1030 
 1031 #ifdef _KERNEL
 1032 /*
 1033  * lockdebug_dismiss:
 1034  *
 1035  *      The system is rebooting, and potentially from an unsafe
 1036  *      place so avoid any future aborts.
 1037  */
 1038 void
 1039 lockdebug_dismiss(void)
 1040 {
 1041 
 1042         atomic_inc_uint_nv(&ld_panic);
 1043 }
 1044 
 1045 /*
 1046  * lockdebug_abort:
 1047  *
 1048  *      An error has been trapped - dump lock info and call panic().
 1049  */
 1050 void
 1051 lockdebug_abort(const char *func, size_t line, const volatile void *lock,
 1052     lockops_t *ops, const char *msg)
 1053 {
 1054 #ifdef LOCKDEBUG
 1055         lockdebug_t *ld;
 1056         int s;
 1057 
 1058         s = splhigh();
 1059         if ((ld = lockdebug_lookup(func, line, lock,
 1060                         (uintptr_t) __builtin_return_address(0))) != NULL) {
 1061                 lockdebug_abort1(func, line, ld, s, msg, true);
 1062                 return;
 1063         }
 1064         splx(s);
 1065 #endif  /* LOCKDEBUG */
 1066 
 1067         /*
 1068          * Don't make the situation worse if the system is already going
 1069          * down in flames.  Once a panic is triggered, lockdebug state
 1070          * becomes stale and cannot be trusted.
 1071          */
 1072         if (atomic_inc_uint_nv(&ld_panic) > 1)
 1073                 return;
 1074 
 1075         char locksym[128];
 1076 
 1077 #ifdef DDB
 1078         db_symstr(locksym, sizeof(locksym), (db_expr_t)(intptr_t)lock,
 1079             DB_STGY_ANY);
 1080 #else
 1081         snprintf(locksym, sizeof(locksym), "%#018lx", (unsigned long)lock);
 1082 #endif
 1083 
 1084         printf("%s error: %s,%zu: %s\n\n"
 1085             "lock address : %s\n"
 1086             "current cpu  : %18d\n"
 1087             "current lwp  : %#018lx\n",
 1088             ops->lo_name, func, line, msg, locksym,
 1089             (int)cpu_index(curcpu()), (long)curlwp);
 1090         (*ops->lo_dump)(lock, printf);
 1091         printf("\n");
 1092 
 1093         panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
 1094             ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
 1095 }
 1096 #endif /* _KERNEL */

Cache object: fc743f4031a2f46f7ff8f2dfb3688e9a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.