The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_lock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 /*
   28  * This module holds the global variables and functions used to maintain
   29  * lock_object structures.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/10.1/sys/kern/subr_lock.c 262192 2014-02-18 20:27:17Z jhb $");
   34 
   35 #include "opt_ddb.h"
   36 #include "opt_mprof.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/kernel.h>
   41 #include <sys/ktr.h>
   42 #include <sys/lock.h>
   43 #include <sys/lock_profile.h>
   44 #include <sys/malloc.h>
   45 #include <sys/mutex.h>
   46 #include <sys/pcpu.h>
   47 #include <sys/proc.h>
   48 #include <sys/sbuf.h>
   49 #include <sys/sched.h>
   50 #include <sys/smp.h>
   51 #include <sys/sysctl.h>
   52 
   53 #ifdef DDB
   54 #include <ddb/ddb.h>
   55 #endif
   56 
   57 #include <machine/cpufunc.h>
   58 
   59 CTASSERT(LOCK_CLASS_MAX == 15);
   60 
   61 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
   62         &lock_class_mtx_spin,
   63         &lock_class_mtx_sleep,
   64         &lock_class_sx,
   65         &lock_class_rm,
   66         &lock_class_rm_sleepable,
   67         &lock_class_rw,
   68         &lock_class_lockmgr,
   69 };
   70 
   71 void
   72 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
   73     const char *type, int flags)
   74 {
   75         int i;
   76 
   77         /* Check for double-init and zero object. */
   78         KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
   79             name, lock));
   80 
   81         /* Look up lock class to find its index. */
   82         for (i = 0; i < LOCK_CLASS_MAX; i++)
   83                 if (lock_classes[i] == class) {
   84                         lock->lo_flags = i << LO_CLASSSHIFT;
   85                         break;
   86                 }
   87         KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
   88 
   89         /* Initialize the lock object. */
   90         lock->lo_name = name;
   91         lock->lo_flags |= flags | LO_INITIALIZED;
   92         LOCK_LOG_INIT(lock, 0);
   93         WITNESS_INIT(lock, (type != NULL) ? type : name);
   94 }
   95 
   96 void
   97 lock_destroy(struct lock_object *lock)
   98 {
   99 
  100         KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
  101         WITNESS_DESTROY(lock);
  102         LOCK_LOG_DESTROY(lock, 0);
  103         lock->lo_flags &= ~LO_INITIALIZED;
  104 }
  105 
  106 #ifdef DDB
  107 DB_SHOW_COMMAND(lock, db_show_lock)
  108 {
  109         struct lock_object *lock;
  110         struct lock_class *class;
  111 
  112         if (!have_addr)
  113                 return;
  114         lock = (struct lock_object *)addr;
  115         if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
  116                 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
  117                 return;
  118         }
  119         class = LOCK_CLASS(lock);
  120         db_printf(" class: %s\n", class->lc_name);
  121         db_printf(" name: %s\n", lock->lo_name);
  122         class->lc_ddb_show(lock);
  123 }
  124 #endif
  125 
  126 #ifdef LOCK_PROFILING
  127 
  128 /*
  129  * One object per-thread for each lock the thread owns.  Tracks individual
  130  * lock instances.
  131  */
  132 struct lock_profile_object {
  133         LIST_ENTRY(lock_profile_object) lpo_link;
  134         struct lock_object *lpo_obj;
  135         const char      *lpo_file;
  136         int             lpo_line;
  137         uint16_t        lpo_ref;
  138         uint16_t        lpo_cnt;
  139         uint64_t        lpo_acqtime;
  140         uint64_t        lpo_waittime;
  141         u_int           lpo_contest_locking;
  142 };
  143 
  144 /*
  145  * One lock_prof for each (file, line, lock object) triple.
  146  */
  147 struct lock_prof {
  148         SLIST_ENTRY(lock_prof) link;
  149         struct lock_class *class;
  150         const char      *file;
  151         const char      *name;
  152         int             line;
  153         int             ticks;
  154         uintmax_t       cnt_wait_max;
  155         uintmax_t       cnt_max;
  156         uintmax_t       cnt_tot;
  157         uintmax_t       cnt_wait;
  158         uintmax_t       cnt_cur;
  159         uintmax_t       cnt_contest_locking;
  160 };
  161 
  162 SLIST_HEAD(lphead, lock_prof);
  163 
  164 #define LPROF_HASH_SIZE         4096
  165 #define LPROF_HASH_MASK         (LPROF_HASH_SIZE - 1)
  166 #define LPROF_CACHE_SIZE        4096
  167 
  168 /*
  169  * Array of objects and profs for each type of object for each cpu.  Spinlocks
  170  * are handled separately because a thread may be preempted and acquire a
  171  * spinlock while in the lock profiling code of a non-spinlock.  In this way
  172  * we only need a critical section to protect the per-cpu lists.
  173  */
  174 struct lock_prof_type {
  175         struct lphead           lpt_lpalloc;
  176         struct lpohead          lpt_lpoalloc;
  177         struct lphead           lpt_hash[LPROF_HASH_SIZE];
  178         struct lock_prof        lpt_prof[LPROF_CACHE_SIZE];
  179         struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
  180 };
  181 
  182 struct lock_prof_cpu {
  183         struct lock_prof_type   lpc_types[2]; /* One for spin one for other. */
  184 };
  185 
  186 struct lock_prof_cpu *lp_cpu[MAXCPU];
  187 
  188 volatile int lock_prof_enable = 0;
  189 static volatile int lock_prof_resetting;
  190 
  191 #define LPROF_SBUF_SIZE         256
  192 
  193 static int lock_prof_rejected;
  194 static int lock_prof_skipspin;
  195 static int lock_prof_skipcount;
  196 
  197 #ifndef USE_CPU_NANOSECONDS
  198 uint64_t
  199 nanoseconds(void)
  200 {
  201         struct bintime bt;
  202         uint64_t ns;
  203 
  204         binuptime(&bt);
  205         /* From bintime2timespec */
  206         ns = bt.sec * (uint64_t)1000000000;
  207         ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
  208         return (ns);
  209 }
  210 #endif
  211 
  212 static void
  213 lock_prof_init_type(struct lock_prof_type *type)
  214 {
  215         int i;
  216 
  217         SLIST_INIT(&type->lpt_lpalloc);
  218         LIST_INIT(&type->lpt_lpoalloc);
  219         for (i = 0; i < LPROF_CACHE_SIZE; i++) {
  220                 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
  221                     link);
  222                 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
  223                     lpo_link);
  224         }
  225 }
  226 
  227 static void
  228 lock_prof_init(void *arg)
  229 {
  230         int cpu;
  231 
  232         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  233                 lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
  234                     M_WAITOK | M_ZERO);
  235                 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
  236                 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
  237         }
  238 }
  239 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
  240 
  241 static void
  242 lock_prof_reset_wait(void)
  243 {
  244 
  245         /*
  246          * Spin relinquishing our cpu so that quiesce_all_cpus may
  247          * complete.
  248          */
  249         while (lock_prof_resetting)
  250                 sched_relinquish(curthread);
  251 }
  252 
  253 static void
  254 lock_prof_reset(void)
  255 {
  256         struct lock_prof_cpu *lpc;
  257         int enabled, i, cpu;
  258 
  259         /*
  260          * We not only race with acquiring and releasing locks but also
  261          * thread exit.  To be certain that threads exit without valid head
  262          * pointers they must see resetting set before enabled is cleared.
  263          * Otherwise a lock may not be removed from a per-thread list due
  264          * to disabled being set but not wait for reset() to remove it below.
  265          */
  266         atomic_store_rel_int(&lock_prof_resetting, 1);
  267         enabled = lock_prof_enable;
  268         lock_prof_enable = 0;
  269         quiesce_all_cpus("profreset", 0);
  270         /*
  271          * Some objects may have migrated between CPUs.  Clear all links
  272          * before we zero the structures.  Some items may still be linked
  273          * into per-thread lists as well.
  274          */
  275         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  276                 lpc = lp_cpu[cpu];
  277                 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
  278                         LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
  279                         LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
  280                 }
  281         }
  282         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  283                 lpc = lp_cpu[cpu];
  284                 bzero(lpc, sizeof(*lpc));
  285                 lock_prof_init_type(&lpc->lpc_types[0]);
  286                 lock_prof_init_type(&lpc->lpc_types[1]);
  287         }
  288         atomic_store_rel_int(&lock_prof_resetting, 0);
  289         lock_prof_enable = enabled;
  290 }
  291 
  292 static void
  293 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
  294 {
  295         const char *p;
  296 
  297         for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
  298         sbuf_printf(sb,
  299             "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
  300             lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
  301             lp->cnt_wait / 1000, lp->cnt_cur,
  302             lp->cnt_cur == 0 ? (uintmax_t)0 :
  303             lp->cnt_tot / (lp->cnt_cur * 1000),
  304             lp->cnt_cur == 0 ? (uintmax_t)0 :
  305             lp->cnt_wait / (lp->cnt_cur * 1000),
  306             (uintmax_t)0, lp->cnt_contest_locking,
  307             p, lp->line, lp->class->lc_name, lp->name);
  308 }
  309 
  310 static void
  311 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
  312     int spin, int t)
  313 {
  314         struct lock_prof_type *type;
  315         struct lock_prof *l;
  316         int cpu;
  317 
  318         dst->file = match->file;
  319         dst->line = match->line;
  320         dst->class = match->class;
  321         dst->name = match->name;
  322 
  323         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  324                 if (lp_cpu[cpu] == NULL)
  325                         continue;
  326                 type = &lp_cpu[cpu]->lpc_types[spin];
  327                 SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
  328                         if (l->ticks == t)
  329                                 continue;
  330                         if (l->file != match->file || l->line != match->line ||
  331                             l->name != match->name)
  332                                 continue;
  333                         l->ticks = t;
  334                         if (l->cnt_max > dst->cnt_max)
  335                                 dst->cnt_max = l->cnt_max;
  336                         if (l->cnt_wait_max > dst->cnt_wait_max)
  337                                 dst->cnt_wait_max = l->cnt_wait_max;
  338                         dst->cnt_tot += l->cnt_tot;
  339                         dst->cnt_wait += l->cnt_wait;
  340                         dst->cnt_cur += l->cnt_cur;
  341                         dst->cnt_contest_locking += l->cnt_contest_locking;
  342                 }
  343         }
  344         
  345 }
  346 
  347 static void
  348 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
  349     int t)
  350 {
  351         struct lock_prof *l;
  352         int i;
  353 
  354         for (i = 0; i < LPROF_HASH_SIZE; ++i) {
  355                 SLIST_FOREACH(l, &type->lpt_hash[i], link) {
  356                         struct lock_prof lp = {};
  357 
  358                         if (l->ticks == t)
  359                                 continue;
  360                         lock_prof_sum(l, &lp, i, spin, t);
  361                         lock_prof_output(&lp, sb);
  362                 }
  363         }
  364 }
  365 
  366 static int
  367 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
  368 {
  369         struct sbuf *sb;
  370         int error, cpu, t;
  371         int enabled;
  372 
  373         error = sysctl_wire_old_buffer(req, 0);
  374         if (error != 0)
  375                 return (error);
  376         sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
  377         sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
  378             "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
  379         enabled = lock_prof_enable;
  380         lock_prof_enable = 0;
  381         quiesce_all_cpus("profstat", 0);
  382         t = ticks;
  383         for (cpu = 0; cpu <= mp_maxid; cpu++) {
  384                 if (lp_cpu[cpu] == NULL)
  385                         continue;
  386                 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
  387                 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
  388         }
  389         lock_prof_enable = enabled;
  390 
  391         error = sbuf_finish(sb);
  392         /* Output a trailing NUL. */
  393         if (error == 0)
  394                 error = SYSCTL_OUT(req, "", 1);
  395         sbuf_delete(sb);
  396         return (error);
  397 }
  398 
  399 static int
  400 enable_lock_prof(SYSCTL_HANDLER_ARGS)
  401 {
  402         int error, v;
  403 
  404         v = lock_prof_enable;
  405         error = sysctl_handle_int(oidp, &v, v, req);
  406         if (error)
  407                 return (error);
  408         if (req->newptr == NULL)
  409                 return (error);
  410         if (v == lock_prof_enable)
  411                 return (0);
  412         if (v == 1)
  413                 lock_prof_reset();
  414         lock_prof_enable = !!v;
  415 
  416         return (0);
  417 }
  418 
  419 static int
  420 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
  421 {
  422         int error, v;
  423 
  424         v = 0;
  425         error = sysctl_handle_int(oidp, &v, 0, req);
  426         if (error)
  427                 return (error);
  428         if (req->newptr == NULL)
  429                 return (error);
  430         if (v == 0)
  431                 return (0);
  432         lock_prof_reset();
  433 
  434         return (0);
  435 }
  436 
  437 static struct lock_prof *
  438 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
  439     int line)
  440 {
  441         const char *unknown = "(unknown)";
  442         struct lock_prof_type *type;
  443         struct lock_prof *lp;
  444         struct lphead *head;
  445         const char *p;
  446         u_int hash;
  447 
  448         p = file;
  449         if (p == NULL || *p == '\0')
  450                 p = unknown;
  451         hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
  452         hash &= LPROF_HASH_MASK;
  453         type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
  454         head = &type->lpt_hash[hash];
  455         SLIST_FOREACH(lp, head, link) {
  456                 if (lp->line == line && lp->file == p &&
  457                     lp->name == lo->lo_name)
  458                         return (lp);
  459 
  460         }
  461         lp = SLIST_FIRST(&type->lpt_lpalloc);
  462         if (lp == NULL) {
  463                 lock_prof_rejected++;
  464                 return (lp);
  465         }
  466         SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
  467         lp->file = p;
  468         lp->line = line;
  469         lp->class = LOCK_CLASS(lo);
  470         lp->name = lo->lo_name;
  471         SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
  472         return (lp);
  473 }
  474 
  475 static struct lock_profile_object *
  476 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
  477     int line)
  478 {
  479         struct lock_profile_object *l;
  480         struct lock_prof_type *type;
  481         struct lpohead *head;
  482 
  483         head = &curthread->td_lprof[spin];
  484         LIST_FOREACH(l, head, lpo_link)
  485                 if (l->lpo_obj == lo && l->lpo_file == file &&
  486                     l->lpo_line == line)
  487                         return (l);
  488         type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
  489         l = LIST_FIRST(&type->lpt_lpoalloc);
  490         if (l == NULL) {
  491                 lock_prof_rejected++;
  492                 return (NULL);
  493         }
  494         LIST_REMOVE(l, lpo_link);
  495         l->lpo_obj = lo;
  496         l->lpo_file = file;
  497         l->lpo_line = line;
  498         l->lpo_cnt = 0;
  499         LIST_INSERT_HEAD(head, l, lpo_link);
  500 
  501         return (l);
  502 }
  503 
  504 void
  505 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
  506     uint64_t waittime, const char *file, int line)
  507 {
  508         static int lock_prof_count;
  509         struct lock_profile_object *l;
  510         int spin;
  511 
  512         if (SCHEDULER_STOPPED())
  513                 return;
  514 
  515         /* don't reset the timer when/if recursing */
  516         if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
  517                 return;
  518         if (lock_prof_skipcount &&
  519             (++lock_prof_count % lock_prof_skipcount) != 0)
  520                 return;
  521         spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
  522         if (spin && lock_prof_skipspin == 1)
  523                 return;
  524         critical_enter();
  525         /* Recheck enabled now that we're in a critical section. */
  526         if (lock_prof_enable == 0)
  527                 goto out;
  528         l = lock_profile_object_lookup(lo, spin, file, line);
  529         if (l == NULL)
  530                 goto out;
  531         l->lpo_cnt++;
  532         if (++l->lpo_ref > 1)
  533                 goto out;
  534         l->lpo_contest_locking = contested;
  535         l->lpo_acqtime = nanoseconds(); 
  536         if (waittime && (l->lpo_acqtime > waittime))
  537                 l->lpo_waittime = l->lpo_acqtime - waittime;
  538         else
  539                 l->lpo_waittime = 0;
  540 out:
  541         critical_exit();
  542 }
  543 
  544 void
  545 lock_profile_thread_exit(struct thread *td)
  546 {
  547 #ifdef INVARIANTS
  548         struct lock_profile_object *l;
  549 
  550         MPASS(curthread->td_critnest == 0);
  551 #endif
  552         /*
  553          * If lock profiling was disabled we have to wait for reset to
  554          * clear our pointers before we can exit safely.
  555          */
  556         lock_prof_reset_wait();
  557 #ifdef INVARIANTS
  558         LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
  559                 printf("thread still holds lock acquired at %s:%d\n",
  560                     l->lpo_file, l->lpo_line);
  561         LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
  562                 printf("thread still holds lock acquired at %s:%d\n",
  563                     l->lpo_file, l->lpo_line);
  564 #endif
  565         MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
  566         MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
  567 }
  568 
  569 void
  570 lock_profile_release_lock(struct lock_object *lo)
  571 {
  572         struct lock_profile_object *l;
  573         struct lock_prof_type *type;
  574         struct lock_prof *lp;
  575         uint64_t curtime, holdtime;
  576         struct lpohead *head;
  577         int spin;
  578 
  579         if (SCHEDULER_STOPPED())
  580                 return;
  581         if (lo->lo_flags & LO_NOPROFILE)
  582                 return;
  583         spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
  584         head = &curthread->td_lprof[spin];
  585         if (LIST_FIRST(head) == NULL)
  586                 return;
  587         critical_enter();
  588         /* Recheck enabled now that we're in a critical section. */
  589         if (lock_prof_enable == 0 && lock_prof_resetting == 1)
  590                 goto out;
  591         /*
  592          * If lock profiling is not enabled we still want to remove the
  593          * lpo from our queue.
  594          */
  595         LIST_FOREACH(l, head, lpo_link)
  596                 if (l->lpo_obj == lo)
  597                         break;
  598         if (l == NULL)
  599                 goto out;
  600         if (--l->lpo_ref > 0)
  601                 goto out;
  602         lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
  603         if (lp == NULL)
  604                 goto release;
  605         curtime = nanoseconds();
  606         if (curtime < l->lpo_acqtime)
  607                 goto release;
  608         holdtime = curtime - l->lpo_acqtime;
  609 
  610         /*
  611          * Record if the lock has been held longer now than ever
  612          * before.
  613          */
  614         if (holdtime > lp->cnt_max)
  615                 lp->cnt_max = holdtime;
  616         if (l->lpo_waittime > lp->cnt_wait_max)
  617                 lp->cnt_wait_max = l->lpo_waittime;
  618         lp->cnt_tot += holdtime;
  619         lp->cnt_wait += l->lpo_waittime;
  620         lp->cnt_contest_locking += l->lpo_contest_locking;
  621         lp->cnt_cur += l->lpo_cnt;
  622 release:
  623         LIST_REMOVE(l, lpo_link);
  624         type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
  625         LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
  626 out:
  627         critical_exit();
  628 }
  629 
  630 static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
  631 static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL,
  632     "lock profiling");
  633 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
  634     &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
  635 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
  636     &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
  637 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
  638     &lock_prof_rejected, 0, "Number of rejected profiling records");
  639 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
  640     NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
  641 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
  642     NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
  643 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
  644     NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
  645 
  646 #endif

Cache object: 00287c7176678778d4b64f807cbef716


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.