The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_rmlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Machine independent bits of reader/writer lock implementation.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/10.4/sys/kern/kern_rmlock.c 323872 2017-09-21 20:13:03Z marius $");
   36 
   37 #include "opt_ddb.h"
   38 #include "opt_kdtrace.h"
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 
   43 #include <sys/kernel.h>
   44 #include <sys/kdb.h>
   45 #include <sys/ktr.h>
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/proc.h>
   49 #include <sys/rmlock.h>
   50 #include <sys/sched.h>
   51 #include <sys/smp.h>
   52 #include <sys/turnstile.h>
   53 #include <sys/lock_profile.h>
   54 #include <machine/cpu.h>
   55 
   56 #ifdef DDB
   57 #include <ddb/ddb.h>
   58 #endif
   59 
   60 /*
   61  * A cookie to mark destroyed rmlocks.  This is stored in the head of
   62  * rm_activeReaders.
   63  */
   64 #define RM_DESTROYED    ((void *)0xdead)
   65 
   66 #define rm_destroyed(rm)                                                \
   67         (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
   68 
   69 #define RMPF_ONQUEUE    1
   70 #define RMPF_SIGNAL     2
   71 
   72 #ifndef INVARIANTS
   73 #define _rm_assert(c, what, file, line)
   74 #endif
   75 
   76 static void     assert_rm(const struct lock_object *lock, int what);
   77 #ifdef DDB
   78 static void     db_show_rm(const struct lock_object *lock);
   79 #endif
   80 static void     lock_rm(struct lock_object *lock, uintptr_t how);
   81 #ifdef KDTRACE_HOOKS
   82 static int      owner_rm(const struct lock_object *lock, struct thread **owner);
   83 #endif
   84 static uintptr_t unlock_rm(struct lock_object *lock);
   85 
   86 struct lock_class lock_class_rm = {
   87         .lc_name = "rm",
   88         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
   89         .lc_assert = assert_rm,
   90 #ifdef DDB
   91         .lc_ddb_show = db_show_rm,
   92 #endif
   93         .lc_lock = lock_rm,
   94         .lc_unlock = unlock_rm,
   95 #ifdef KDTRACE_HOOKS
   96         .lc_owner = owner_rm,
   97 #endif
   98 };
   99 
  100 struct lock_class lock_class_rm_sleepable = {
  101         .lc_name = "sleepable rm",
  102         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
  103         .lc_assert = assert_rm,
  104 #ifdef DDB
  105         .lc_ddb_show = db_show_rm,
  106 #endif
  107         .lc_lock = lock_rm,
  108         .lc_unlock = unlock_rm,
  109 #ifdef KDTRACE_HOOKS
  110         .lc_owner = owner_rm,
  111 #endif
  112 };
  113 
  114 static void
  115 assert_rm(const struct lock_object *lock, int what)
  116 {
  117 
  118         rm_assert((const struct rmlock *)lock, what);
  119 }
  120 
  121 static void
  122 lock_rm(struct lock_object *lock, uintptr_t how)
  123 {
  124         struct rmlock *rm;
  125         struct rm_priotracker *tracker;
  126 
  127         rm = (struct rmlock *)lock;
  128         if (how == 0)
  129                 rm_wlock(rm);
  130         else {
  131                 tracker = (struct rm_priotracker *)how;
  132                 rm_rlock(rm, tracker);
  133         }
  134 }
  135 
  136 static uintptr_t
  137 unlock_rm(struct lock_object *lock)
  138 {
  139         struct thread *td;
  140         struct pcpu *pc;
  141         struct rmlock *rm;
  142         struct rm_queue *queue;
  143         struct rm_priotracker *tracker;
  144         uintptr_t how;
  145 
  146         rm = (struct rmlock *)lock;
  147         tracker = NULL;
  148         how = 0;
  149         rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
  150         if (rm_wowned(rm))
  151                 rm_wunlock(rm);
  152         else {
  153                 /*
  154                  * Find the right rm_priotracker structure for curthread.
  155                  * The guarantee about its uniqueness is given by the fact
  156                  * we already asserted the lock wasn't recursively acquired.
  157                  */
  158                 critical_enter();
  159                 td = curthread;
  160                 pc = pcpu_find(curcpu);
  161                 for (queue = pc->pc_rm_queue.rmq_next;
  162                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
  163                         tracker = (struct rm_priotracker *)queue;
  164                                 if ((tracker->rmp_rmlock == rm) &&
  165                                     (tracker->rmp_thread == td)) {
  166                                         how = (uintptr_t)tracker;
  167                                         break;
  168                                 }
  169                 }
  170                 KASSERT(tracker != NULL,
  171                     ("rm_priotracker is non-NULL when lock held in read mode"));
  172                 critical_exit();
  173                 rm_runlock(rm, tracker);
  174         }
  175         return (how);
  176 }
  177 
  178 #ifdef KDTRACE_HOOKS
  179 static int
  180 owner_rm(const struct lock_object *lock, struct thread **owner)
  181 {
  182         const struct rmlock *rm;
  183         struct lock_class *lc;
  184 
  185         rm = (const struct rmlock *)lock;
  186         lc = LOCK_CLASS(&rm->rm_wlock_object);
  187         return (lc->lc_owner(&rm->rm_wlock_object, owner));
  188 }
  189 #endif
  190 
  191 static struct mtx rm_spinlock;
  192 
  193 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
  194 
  195 /*
  196  * Add or remove tracker from per-cpu list.
  197  *
  198  * The per-cpu list can be traversed at any time in forward direction from an
  199  * interrupt on the *local* cpu.
  200  */
  201 static void inline
  202 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
  203 {
  204         struct rm_queue *next;
  205 
  206         /* Initialize all tracker pointers */
  207         tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
  208         next = pc->pc_rm_queue.rmq_next;
  209         tracker->rmp_cpuQueue.rmq_next = next;
  210 
  211         /* rmq_prev is not used during froward traversal. */
  212         next->rmq_prev = &tracker->rmp_cpuQueue;
  213 
  214         /* Update pointer to first element. */
  215         pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
  216 }
  217 
  218 /*
  219  * Return a count of the number of trackers the thread 'td' already
  220  * has on this CPU for the lock 'rm'.
  221  */
  222 static int
  223 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
  224     const struct thread *td)
  225 {
  226         struct rm_queue *queue;
  227         struct rm_priotracker *tracker;
  228         int count;
  229 
  230         count = 0;
  231         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
  232             queue = queue->rmq_next) {
  233                 tracker = (struct rm_priotracker *)queue;
  234                 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
  235                         count++;
  236         }
  237         return (count);
  238 }
  239 
  240 static void inline
  241 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
  242 {
  243         struct rm_queue *next, *prev;
  244 
  245         next = tracker->rmp_cpuQueue.rmq_next;
  246         prev = tracker->rmp_cpuQueue.rmq_prev;
  247 
  248         /* Not used during forward traversal. */
  249         next->rmq_prev = prev;
  250 
  251         /* Remove from list. */
  252         prev->rmq_next = next;
  253 }
  254 
  255 static void
  256 rm_cleanIPI(void *arg)
  257 {
  258         struct pcpu *pc;
  259         struct rmlock *rm = arg;
  260         struct rm_priotracker *tracker;
  261         struct rm_queue *queue;
  262         pc = pcpu_find(curcpu);
  263 
  264         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
  265             queue = queue->rmq_next) {
  266                 tracker = (struct rm_priotracker *)queue;
  267                 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
  268                         tracker->rmp_flags = RMPF_ONQUEUE;
  269                         mtx_lock_spin(&rm_spinlock);
  270                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
  271                             rmp_qentry);
  272                         mtx_unlock_spin(&rm_spinlock);
  273                 }
  274         }
  275 }
  276 
  277 void
  278 rm_init_flags(struct rmlock *rm, const char *name, int opts)
  279 {
  280         struct lock_class *lc;
  281         int liflags, xflags;
  282 
  283         liflags = 0;
  284         if (!(opts & RM_NOWITNESS))
  285                 liflags |= LO_WITNESS;
  286         if (opts & RM_RECURSE)
  287                 liflags |= LO_RECURSABLE;
  288         if (opts & RM_NEW)
  289                 liflags |= LO_NEW;
  290         rm->rm_writecpus = all_cpus;
  291         LIST_INIT(&rm->rm_activeReaders);
  292         if (opts & RM_SLEEPABLE) {
  293                 liflags |= LO_SLEEPABLE;
  294                 lc = &lock_class_rm_sleepable;
  295                 xflags = (opts & RM_NEW ? SX_NEW : 0);
  296                 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
  297                     xflags | SX_NOWITNESS);
  298         } else {
  299                 lc = &lock_class_rm;
  300                 xflags = (opts & RM_NEW ? MTX_NEW : 0);
  301                 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
  302                     xflags | MTX_NOWITNESS);
  303         }
  304         lock_init(&rm->lock_object, lc, name, NULL, liflags);
  305 }
  306 
  307 void
  308 rm_init(struct rmlock *rm, const char *name)
  309 {
  310 
  311         rm_init_flags(rm, name, 0);
  312 }
  313 
  314 void
  315 rm_destroy(struct rmlock *rm)
  316 {
  317 
  318         rm_assert(rm, RA_UNLOCKED);
  319         LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
  320         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  321                 sx_destroy(&rm->rm_lock_sx);
  322         else
  323                 mtx_destroy(&rm->rm_lock_mtx);
  324         lock_destroy(&rm->lock_object);
  325 }
  326 
  327 int
  328 rm_wowned(const struct rmlock *rm)
  329 {
  330 
  331         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  332                 return (sx_xlocked(&rm->rm_lock_sx));
  333         else
  334                 return (mtx_owned(&rm->rm_lock_mtx));
  335 }
  336 
  337 void
  338 rm_sysinit(void *arg)
  339 {
  340         struct rm_args *args = arg;
  341 
  342         rm_init(args->ra_rm, args->ra_desc);
  343 }
  344 
  345 void
  346 rm_sysinit_flags(void *arg)
  347 {
  348         struct rm_args_flags *args = arg;
  349 
  350         rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
  351 }
  352 
  353 static int
  354 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
  355 {
  356         struct pcpu *pc;
  357 
  358         critical_enter();
  359         pc = pcpu_find(curcpu);
  360 
  361         /* Check if we just need to do a proper critical_exit. */
  362         if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
  363                 critical_exit();
  364                 return (1);
  365         }
  366 
  367         /* Remove our tracker from the per-cpu list. */
  368         rm_tracker_remove(pc, tracker);
  369 
  370         /* Check to see if the IPI granted us the lock after all. */
  371         if (tracker->rmp_flags) {
  372                 /* Just add back tracker - we hold the lock. */
  373                 rm_tracker_add(pc, tracker);
  374                 critical_exit();
  375                 return (1);
  376         }
  377 
  378         /*
  379          * We allow readers to acquire a lock even if a writer is blocked if
  380          * the lock is recursive and the reader already holds the lock.
  381          */
  382         if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  383                 /*
  384                  * Just grant the lock if this thread already has a tracker
  385                  * for this lock on the per-cpu queue.
  386                  */
  387                 if (rm_trackers_present(pc, rm, curthread) != 0) {
  388                         mtx_lock_spin(&rm_spinlock);
  389                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
  390                             rmp_qentry);
  391                         tracker->rmp_flags = RMPF_ONQUEUE;
  392                         mtx_unlock_spin(&rm_spinlock);
  393                         rm_tracker_add(pc, tracker);
  394                         critical_exit();
  395                         return (1);
  396                 }
  397         }
  398 
  399         sched_unpin();
  400         critical_exit();
  401 
  402         if (trylock) {
  403                 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
  404                         if (!sx_try_xlock(&rm->rm_lock_sx))
  405                                 return (0);
  406                 } else {
  407                         if (!mtx_trylock(&rm->rm_lock_mtx))
  408                                 return (0);
  409                 }
  410         } else {
  411                 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
  412                         THREAD_SLEEPING_OK();
  413                         sx_xlock(&rm->rm_lock_sx);
  414                         THREAD_NO_SLEEPING();
  415                 } else
  416                         mtx_lock(&rm->rm_lock_mtx);
  417         }
  418 
  419         critical_enter();
  420         pc = pcpu_find(curcpu);
  421         CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
  422         rm_tracker_add(pc, tracker);
  423         sched_pin();
  424         critical_exit();
  425 
  426         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  427                 sx_xunlock(&rm->rm_lock_sx);
  428         else
  429                 mtx_unlock(&rm->rm_lock_mtx);
  430 
  431         return (1);
  432 }
  433 
  434 int
  435 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
  436 {
  437         struct thread *td = curthread;
  438         struct pcpu *pc;
  439 
  440         if (SCHEDULER_STOPPED())
  441                 return (1);
  442 
  443         tracker->rmp_flags  = 0;
  444         tracker->rmp_thread = td;
  445         tracker->rmp_rmlock = rm;
  446 
  447         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  448                 THREAD_NO_SLEEPING();
  449 
  450         td->td_critnest++;      /* critical_enter(); */
  451 
  452         __compiler_membar();
  453 
  454         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  455 
  456         rm_tracker_add(pc, tracker);
  457 
  458         sched_pin();
  459 
  460         __compiler_membar();
  461 
  462         td->td_critnest--;
  463 
  464         /*
  465          * Fast path to combine two common conditions into a single
  466          * conditional jump.
  467          */
  468         if (0 == (td->td_owepreempt |
  469             CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
  470                 return (1);
  471 
  472         /* We do not have a read token and need to acquire one. */
  473         return _rm_rlock_hard(rm, tracker, trylock);
  474 }
  475 
  476 static void
  477 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
  478 {
  479 
  480         if (td->td_owepreempt) {
  481                 td->td_critnest++;
  482                 critical_exit();
  483         }
  484 
  485         if (!tracker->rmp_flags)
  486                 return;
  487 
  488         mtx_lock_spin(&rm_spinlock);
  489         LIST_REMOVE(tracker, rmp_qentry);
  490 
  491         if (tracker->rmp_flags & RMPF_SIGNAL) {
  492                 struct rmlock *rm;
  493                 struct turnstile *ts;
  494 
  495                 rm = tracker->rmp_rmlock;
  496 
  497                 turnstile_chain_lock(&rm->lock_object);
  498                 mtx_unlock_spin(&rm_spinlock);
  499 
  500                 ts = turnstile_lookup(&rm->lock_object);
  501 
  502                 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
  503                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
  504                 turnstile_chain_unlock(&rm->lock_object);
  505         } else
  506                 mtx_unlock_spin(&rm_spinlock);
  507 }
  508 
  509 void
  510 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
  511 {
  512         struct pcpu *pc;
  513         struct thread *td = tracker->rmp_thread;
  514 
  515         if (SCHEDULER_STOPPED())
  516                 return;
  517 
  518         td->td_critnest++;      /* critical_enter(); */
  519         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  520         rm_tracker_remove(pc, tracker);
  521         td->td_critnest--;
  522         sched_unpin();
  523 
  524         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  525                 THREAD_SLEEPING_OK();
  526 
  527         if (0 == (td->td_owepreempt | tracker->rmp_flags))
  528                 return;
  529 
  530         _rm_unlock_hard(td, tracker);
  531 }
  532 
  533 void
  534 _rm_wlock(struct rmlock *rm)
  535 {
  536         struct rm_priotracker *prio;
  537         struct turnstile *ts;
  538         cpuset_t readcpus;
  539 
  540         if (SCHEDULER_STOPPED())
  541                 return;
  542 
  543         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  544                 sx_xlock(&rm->rm_lock_sx);
  545         else
  546                 mtx_lock(&rm->rm_lock_mtx);
  547 
  548         if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
  549                 /* Get all read tokens back */
  550                 readcpus = all_cpus;
  551                 CPU_NAND(&readcpus, &rm->rm_writecpus);
  552                 rm->rm_writecpus = all_cpus;
  553 
  554                 /*
  555                  * Assumes rm->rm_writecpus update is visible on other CPUs
  556                  * before rm_cleanIPI is called.
  557                  */
  558 #ifdef SMP
  559                 smp_rendezvous_cpus(readcpus,
  560                     smp_no_rendevous_barrier,
  561                     rm_cleanIPI,
  562                     smp_no_rendevous_barrier,
  563                     rm);
  564 
  565 #else
  566                 rm_cleanIPI(rm);
  567 #endif
  568 
  569                 mtx_lock_spin(&rm_spinlock);
  570                 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
  571                         ts = turnstile_trywait(&rm->lock_object);
  572                         prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
  573                         mtx_unlock_spin(&rm_spinlock);
  574                         turnstile_wait(ts, prio->rmp_thread,
  575                             TS_EXCLUSIVE_QUEUE);
  576                         mtx_lock_spin(&rm_spinlock);
  577                 }
  578                 mtx_unlock_spin(&rm_spinlock);
  579         }
  580 }
  581 
  582 void
  583 _rm_wunlock(struct rmlock *rm)
  584 {
  585 
  586         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  587                 sx_xunlock(&rm->rm_lock_sx);
  588         else
  589                 mtx_unlock(&rm->rm_lock_mtx);
  590 }
  591 
  592 #if LOCK_DEBUG > 0
  593 
  594 void
  595 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  596 {
  597 
  598         if (SCHEDULER_STOPPED())
  599                 return;
  600 
  601         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  602             ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
  603             curthread, rm->lock_object.lo_name, file, line));
  604         KASSERT(!rm_destroyed(rm),
  605             ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
  606         _rm_assert(rm, RA_UNLOCKED, file, line);
  607 
  608         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
  609             file, line, NULL);
  610 
  611         _rm_wlock(rm);
  612 
  613         LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
  614 
  615         WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  616 
  617         curthread->td_locks++;
  618 
  619 }
  620 
  621 void
  622 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  623 {
  624 
  625         if (SCHEDULER_STOPPED())
  626                 return;
  627 
  628         KASSERT(!rm_destroyed(rm),
  629             ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
  630         _rm_assert(rm, RA_WLOCKED, file, line);
  631         WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  632         LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
  633         _rm_wunlock(rm);
  634         curthread->td_locks--;
  635 }
  636 
  637 int
  638 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  639     int trylock, const char *file, int line)
  640 {
  641 
  642         if (SCHEDULER_STOPPED())
  643                 return (1);
  644 
  645 #ifdef INVARIANTS
  646         if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
  647                 critical_enter();
  648                 KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
  649                     curthread) == 0,
  650                     ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
  651                     rm->lock_object.lo_name, file, line));
  652                 critical_exit();
  653         }
  654 #endif
  655         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  656             ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
  657             curthread, rm->lock_object.lo_name, file, line));
  658         KASSERT(!rm_destroyed(rm),
  659             ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
  660         if (!trylock) {
  661                 KASSERT(!rm_wowned(rm),
  662                     ("rm_rlock: wlock already held for %s @ %s:%d",
  663                     rm->lock_object.lo_name, file, line));
  664                 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
  665                     NULL);
  666         }
  667 
  668         if (_rm_rlock(rm, tracker, trylock)) {
  669                 if (trylock)
  670                         LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
  671                             line);
  672                 else
  673                         LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
  674                             line);
  675                 WITNESS_LOCK(&rm->lock_object, 0, file, line);
  676 
  677                 curthread->td_locks++;
  678 
  679                 return (1);
  680         } else if (trylock)
  681                 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
  682 
  683         return (0);
  684 }
  685 
  686 void
  687 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  688     const char *file, int line)
  689 {
  690 
  691         if (SCHEDULER_STOPPED())
  692                 return;
  693 
  694         KASSERT(!rm_destroyed(rm),
  695             ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
  696         _rm_assert(rm, RA_RLOCKED, file, line);
  697         WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
  698         LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
  699         _rm_runlock(rm, tracker);
  700         curthread->td_locks--;
  701 }
  702 
  703 #else
  704 
  705 /*
  706  * Just strip out file and line arguments if no lock debugging is enabled in
  707  * the kernel - we are called from a kernel module.
  708  */
  709 void
  710 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  711 {
  712 
  713         _rm_wlock(rm);
  714 }
  715 
  716 void
  717 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  718 {
  719 
  720         _rm_wunlock(rm);
  721 }
  722 
  723 int
  724 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  725     int trylock, const char *file, int line)
  726 {
  727 
  728         return _rm_rlock(rm, tracker, trylock);
  729 }
  730 
  731 void
  732 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  733     const char *file, int line)
  734 {
  735 
  736         _rm_runlock(rm, tracker);
  737 }
  738 
  739 #endif
  740 
  741 #ifdef INVARIANT_SUPPORT
  742 #ifndef INVARIANTS
  743 #undef _rm_assert
  744 #endif
  745 
  746 /*
  747  * Note that this does not need to use witness_assert() for read lock
  748  * assertions since an exact count of read locks held by this thread
  749  * is computable.
  750  */
  751 void
  752 _rm_assert(const struct rmlock *rm, int what, const char *file, int line)
  753 {
  754         int count;
  755 
  756         if (panicstr != NULL)
  757                 return;
  758         switch (what) {
  759         case RA_LOCKED:
  760         case RA_LOCKED | RA_RECURSED:
  761         case RA_LOCKED | RA_NOTRECURSED:
  762         case RA_RLOCKED:
  763         case RA_RLOCKED | RA_RECURSED:
  764         case RA_RLOCKED | RA_NOTRECURSED:
  765                 /*
  766                  * Handle the write-locked case.  Unlike other
  767                  * primitives, writers can never recurse.
  768                  */
  769                 if (rm_wowned(rm)) {
  770                         if (what & RA_RLOCKED)
  771                                 panic("Lock %s exclusively locked @ %s:%d\n",
  772                                     rm->lock_object.lo_name, file, line);
  773                         if (what & RA_RECURSED)
  774                                 panic("Lock %s not recursed @ %s:%d\n",
  775                                     rm->lock_object.lo_name, file, line);
  776                         break;
  777                 }
  778 
  779                 critical_enter();
  780                 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
  781                 critical_exit();
  782 
  783                 if (count == 0)
  784                         panic("Lock %s not %slocked @ %s:%d\n",
  785                             rm->lock_object.lo_name, (what & RA_RLOCKED) ?
  786                             "read " : "", file, line);
  787                 if (count > 1) {
  788                         if (what & RA_NOTRECURSED)
  789                                 panic("Lock %s recursed @ %s:%d\n",
  790                                     rm->lock_object.lo_name, file, line);
  791                 } else if (what & RA_RECURSED)
  792                         panic("Lock %s not recursed @ %s:%d\n",
  793                             rm->lock_object.lo_name, file, line);
  794                 break;
  795         case RA_WLOCKED:
  796                 if (!rm_wowned(rm))
  797                         panic("Lock %s not exclusively locked @ %s:%d\n",
  798                             rm->lock_object.lo_name, file, line);
  799                 break;
  800         case RA_UNLOCKED:
  801                 if (rm_wowned(rm))
  802                         panic("Lock %s exclusively locked @ %s:%d\n",
  803                             rm->lock_object.lo_name, file, line);
  804 
  805                 critical_enter();
  806                 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
  807                 critical_exit();
  808 
  809                 if (count != 0)
  810                         panic("Lock %s read locked @ %s:%d\n",
  811                             rm->lock_object.lo_name, file, line);
  812                 break;
  813         default:
  814                 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
  815                     line);
  816         }
  817 }
  818 #endif /* INVARIANT_SUPPORT */
  819 
  820 #ifdef DDB
  821 static void
  822 print_tracker(struct rm_priotracker *tr)
  823 {
  824         struct thread *td;
  825 
  826         td = tr->rmp_thread;
  827         db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
  828             td->td_proc->p_pid, td->td_name);
  829         if (tr->rmp_flags & RMPF_ONQUEUE) {
  830                 db_printf("ONQUEUE");
  831                 if (tr->rmp_flags & RMPF_SIGNAL)
  832                         db_printf(",SIGNAL");
  833         } else
  834                 db_printf("");
  835         db_printf("}\n");
  836 }
  837 
  838 static void
  839 db_show_rm(const struct lock_object *lock)
  840 {
  841         struct rm_priotracker *tr;
  842         struct rm_queue *queue;
  843         const struct rmlock *rm;
  844         struct lock_class *lc;
  845         struct pcpu *pc;
  846 
  847         rm = (const struct rmlock *)lock;
  848         db_printf(" writecpus: ");
  849         ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
  850         db_printf("\n");
  851         db_printf(" per-CPU readers:\n");
  852         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
  853                 for (queue = pc->pc_rm_queue.rmq_next;
  854                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
  855                         tr = (struct rm_priotracker *)queue;
  856                         if (tr->rmp_rmlock == rm)
  857                                 print_tracker(tr);
  858                 }
  859         db_printf(" active readers:\n");
  860         LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
  861                 print_tracker(tr);
  862         lc = LOCK_CLASS(&rm->rm_wlock_object);
  863         db_printf("Backing write-lock (%s):\n", lc->lc_name);
  864         lc->lc_ddb_show(&rm->rm_wlock_object);
  865 }
  866 #endif

Cache object: 9726f268baebb26f7d65833975a1ddf2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.