The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_rmlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Machine independent bits of reader/writer lock implementation.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 #include "opt_ddb.h"
   38 #include "opt_kdtrace.h"
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 
   43 #include <sys/kernel.h>
   44 #include <sys/kdb.h>
   45 #include <sys/ktr.h>
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/proc.h>
   49 #include <sys/rmlock.h>
   50 #include <sys/sched.h>
   51 #include <sys/smp.h>
   52 #include <sys/turnstile.h>
   53 #include <sys/lock_profile.h>
   54 #include <machine/cpu.h>
   55 
   56 #ifdef DDB
   57 #include <ddb/ddb.h>
   58 #endif
   59 
   60 /*
   61  * A cookie to mark destroyed rmlocks.  This is stored in the head of
   62  * rm_activeReaders.
   63  */
   64 #define RM_DESTROYED    ((void *)0xdead)
   65 
   66 #define rm_destroyed(rm)                                                \
   67         (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
   68 
   69 #define RMPF_ONQUEUE    1
   70 #define RMPF_SIGNAL     2
   71 
   72 #ifndef INVARIANTS
   73 #define _rm_assert(c, what, file, line)
   74 #endif
   75 
   76 static void     assert_rm(struct lock_object *lock, int what);
   77 #ifdef DDB
   78 static void     db_show_rm(struct lock_object *lock);
   79 #endif
   80 static void     lock_rm(struct lock_object *lock, int how);
   81 #ifdef KDTRACE_HOOKS
   82 static int      owner_rm(struct lock_object *lock, struct thread **owner);
   83 #endif
   84 static int      unlock_rm(struct lock_object *lock);
   85 
   86 struct lock_class lock_class_rm = {
   87         .lc_name = "rm",
   88         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
   89         .lc_assert = assert_rm,
   90 #ifdef DDB
   91         .lc_ddb_show = db_show_rm,
   92 #endif
   93         .lc_lock = lock_rm,
   94         .lc_unlock = unlock_rm,
   95 #ifdef KDTRACE_HOOKS
   96         .lc_owner = owner_rm,
   97 #endif
   98 };
   99 
  100 struct lock_class lock_class_rm_sleepable = {
  101         .lc_name = "sleepable rm",
  102         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
  103         .lc_assert = assert_rm,
  104 #ifdef DDB
  105         .lc_ddb_show = db_show_rm,
  106 #endif
  107         .lc_lock = lock_rm,
  108         .lc_unlock = unlock_rm,
  109 #ifdef KDTRACE_HOOKS
  110         .lc_owner = owner_rm,
  111 #endif
  112 };
  113 
  114 static void
  115 assert_rm(struct lock_object *lock, int what)
  116 {
  117 
  118         rm_assert((struct rmlock *)lock, what);
  119 }
  120 
  121 /*
  122  * These do not support read locks because it would be hard to make
  123  * the tracker work correctly with the current lock_class API as you
  124  * would need to have the tracker pointer available when calling
  125  * rm_rlock() in lock_rm().
  126  */
  127 static void
  128 lock_rm(struct lock_object *lock, int how)
  129 {
  130         struct rmlock *rm;
  131 
  132         rm = (struct rmlock *)lock;
  133         if (how)
  134                 rm_wlock(rm);
  135 #ifdef INVARIANTS
  136         else
  137                 panic("lock_rm called in read mode");
  138 #endif
  139 }
  140 
  141 static int
  142 unlock_rm(struct lock_object *lock)
  143 {
  144         struct rmlock *rm;
  145 
  146         rm = (struct rmlock *)lock;
  147         rm_wunlock(rm);
  148         return (1);
  149 }
  150 
  151 #ifdef KDTRACE_HOOKS
  152 static int
  153 owner_rm(struct lock_object *lock, struct thread **owner)
  154 {
  155         struct rmlock *rm;
  156         struct lock_class *lc;
  157 
  158         rm = (struct rmlock *)lock;
  159         lc = LOCK_CLASS(&rm->rm_wlock_object);
  160         return (lc->lc_owner(&rm->rm_wlock_object, owner));
  161 }
  162 #endif
  163 
  164 static struct mtx rm_spinlock;
  165 
  166 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
  167 
  168 /*
  169  * Add or remove tracker from per-cpu list.
  170  *
  171  * The per-cpu list can be traversed at any time in forward direction from an
  172  * interrupt on the *local* cpu.
  173  */
  174 static void inline
  175 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
  176 {
  177         struct rm_queue *next;
  178 
  179         /* Initialize all tracker pointers */
  180         tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
  181         next = pc->pc_rm_queue.rmq_next;
  182         tracker->rmp_cpuQueue.rmq_next = next;
  183 
  184         /* rmq_prev is not used during froward traversal. */
  185         next->rmq_prev = &tracker->rmp_cpuQueue;
  186 
  187         /* Update pointer to first element. */
  188         pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
  189 }
  190 
  191 /*
  192  * Return a count of the number of trackers the thread 'td' already
  193  * has on this CPU for the lock 'rm'.
  194  */
  195 static int
  196 rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
  197     const struct thread *td)
  198 {
  199         struct rm_queue *queue;
  200         struct rm_priotracker *tracker;
  201         int count;
  202 
  203         count = 0;
  204         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
  205             queue = queue->rmq_next) {
  206                 tracker = (struct rm_priotracker *)queue;
  207                 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
  208                         count++;
  209         }
  210         return (count);
  211 }
  212 
  213 static void inline
  214 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
  215 {
  216         struct rm_queue *next, *prev;
  217 
  218         next = tracker->rmp_cpuQueue.rmq_next;
  219         prev = tracker->rmp_cpuQueue.rmq_prev;
  220 
  221         /* Not used during forward traversal. */
  222         next->rmq_prev = prev;
  223 
  224         /* Remove from list. */
  225         prev->rmq_next = next;
  226 }
  227 
  228 static void
  229 rm_cleanIPI(void *arg)
  230 {
  231         struct pcpu *pc;
  232         struct rmlock *rm = arg;
  233         struct rm_priotracker *tracker;
  234         struct rm_queue *queue;
  235         pc = pcpu_find(curcpu);
  236 
  237         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
  238             queue = queue->rmq_next) {
  239                 tracker = (struct rm_priotracker *)queue;
  240                 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
  241                         tracker->rmp_flags = RMPF_ONQUEUE;
  242                         mtx_lock_spin(&rm_spinlock);
  243                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
  244                             rmp_qentry);
  245                         mtx_unlock_spin(&rm_spinlock);
  246                 }
  247         }
  248 }
  249 
  250 void
  251 rm_init_flags(struct rmlock *rm, const char *name, int opts)
  252 {
  253         struct lock_class *lc;
  254         int liflags;
  255 
  256         liflags = 0;
  257         if (!(opts & RM_NOWITNESS))
  258                 liflags |= LO_WITNESS;
  259         if (opts & RM_RECURSE)
  260                 liflags |= LO_RECURSABLE;
  261         rm->rm_writecpus = all_cpus;
  262         LIST_INIT(&rm->rm_activeReaders);
  263         if (opts & RM_SLEEPABLE) {
  264                 liflags |= LO_SLEEPABLE;
  265                 lc = &lock_class_rm_sleepable;
  266                 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
  267         } else {
  268                 lc = &lock_class_rm;
  269                 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
  270         }
  271         lock_init(&rm->lock_object, lc, name, NULL, liflags);
  272 }
  273 
  274 void
  275 rm_init(struct rmlock *rm, const char *name)
  276 {
  277 
  278         rm_init_flags(rm, name, 0);
  279 }
  280 
  281 void
  282 rm_destroy(struct rmlock *rm)
  283 {
  284 
  285         rm_assert(rm, RA_UNLOCKED);
  286         LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
  287         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  288                 sx_destroy(&rm->rm_lock_sx);
  289         else
  290                 mtx_destroy(&rm->rm_lock_mtx);
  291         lock_destroy(&rm->lock_object);
  292 }
  293 
  294 int
  295 rm_wowned(struct rmlock *rm)
  296 {
  297 
  298         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  299                 return (sx_xlocked(&rm->rm_lock_sx));
  300         else
  301                 return (mtx_owned(&rm->rm_lock_mtx));
  302 }
  303 
  304 void
  305 rm_sysinit(void *arg)
  306 {
  307         struct rm_args *args = arg;
  308 
  309         rm_init(args->ra_rm, args->ra_desc);
  310 }
  311 
  312 void
  313 rm_sysinit_flags(void *arg)
  314 {
  315         struct rm_args_flags *args = arg;
  316 
  317         rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
  318 }
  319 
  320 static int
  321 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
  322 {
  323         struct pcpu *pc;
  324 
  325         critical_enter();
  326         pc = pcpu_find(curcpu);
  327 
  328         /* Check if we just need to do a proper critical_exit. */
  329         if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
  330                 critical_exit();
  331                 return (1);
  332         }
  333 
  334         /* Remove our tracker from the per-cpu list. */
  335         rm_tracker_remove(pc, tracker);
  336 
  337         /* Check to see if the IPI granted us the lock after all. */
  338         if (tracker->rmp_flags) {
  339                 /* Just add back tracker - we hold the lock. */
  340                 rm_tracker_add(pc, tracker);
  341                 critical_exit();
  342                 return (1);
  343         }
  344 
  345         /*
  346          * We allow readers to aquire a lock even if a writer is blocked if
  347          * the lock is recursive and the reader already holds the lock.
  348          */
  349         if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  350                 /*
  351                  * Just grant the lock if this thread already has a tracker
  352                  * for this lock on the per-cpu queue.
  353                  */
  354                 if (rm_trackers_present(pc, rm, curthread) != 0) {
  355                         mtx_lock_spin(&rm_spinlock);
  356                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
  357                             rmp_qentry);
  358                         tracker->rmp_flags = RMPF_ONQUEUE;
  359                         mtx_unlock_spin(&rm_spinlock);
  360                         rm_tracker_add(pc, tracker);
  361                         critical_exit();
  362                         return (1);
  363                 }
  364         }
  365 
  366         sched_unpin();
  367         critical_exit();
  368 
  369         if (trylock) {
  370                 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
  371                         if (!sx_try_xlock(&rm->rm_lock_sx))
  372                                 return (0);
  373                 } else {
  374                         if (!mtx_trylock(&rm->rm_lock_mtx))
  375                                 return (0);
  376                 }
  377         } else {
  378                 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
  379                         THREAD_SLEEPING_OK();
  380                         sx_xlock(&rm->rm_lock_sx);
  381                         THREAD_NO_SLEEPING();
  382                 } else
  383                         mtx_lock(&rm->rm_lock_mtx);
  384         }
  385 
  386         critical_enter();
  387         pc = pcpu_find(curcpu);
  388         CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
  389         rm_tracker_add(pc, tracker);
  390         sched_pin();
  391         critical_exit();
  392 
  393         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  394                 sx_xunlock(&rm->rm_lock_sx);
  395         else
  396                 mtx_unlock(&rm->rm_lock_mtx);
  397 
  398         return (1);
  399 }
  400 
  401 int
  402 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
  403 {
  404         struct thread *td = curthread;
  405         struct pcpu *pc;
  406 
  407         if (SCHEDULER_STOPPED())
  408                 return (1);
  409 
  410         tracker->rmp_flags  = 0;
  411         tracker->rmp_thread = td;
  412         tracker->rmp_rmlock = rm;
  413 
  414         td->td_critnest++;      /* critical_enter(); */
  415 
  416         __compiler_membar();
  417 
  418         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  419 
  420         rm_tracker_add(pc, tracker);
  421 
  422         sched_pin();
  423 
  424         __compiler_membar();
  425 
  426         td->td_critnest--;
  427 
  428         /*
  429          * Fast path to combine two common conditions into a single
  430          * conditional jump.
  431          */
  432         if (0 == (td->td_owepreempt |
  433             CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
  434                 return (1);
  435 
  436         /* We do not have a read token and need to acquire one. */
  437         return _rm_rlock_hard(rm, tracker, trylock);
  438 }
  439 
  440 static void
  441 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
  442 {
  443 
  444         if (td->td_owepreempt) {
  445                 td->td_critnest++;
  446                 critical_exit();
  447         }
  448 
  449         if (!tracker->rmp_flags)
  450                 return;
  451 
  452         mtx_lock_spin(&rm_spinlock);
  453         LIST_REMOVE(tracker, rmp_qentry);
  454 
  455         if (tracker->rmp_flags & RMPF_SIGNAL) {
  456                 struct rmlock *rm;
  457                 struct turnstile *ts;
  458 
  459                 rm = tracker->rmp_rmlock;
  460 
  461                 turnstile_chain_lock(&rm->lock_object);
  462                 mtx_unlock_spin(&rm_spinlock);
  463 
  464                 ts = turnstile_lookup(&rm->lock_object);
  465 
  466                 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
  467                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
  468                 turnstile_chain_unlock(&rm->lock_object);
  469         } else
  470                 mtx_unlock_spin(&rm_spinlock);
  471 }
  472 
  473 void
  474 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
  475 {
  476         struct pcpu *pc;
  477         struct thread *td = tracker->rmp_thread;
  478 
  479         if (SCHEDULER_STOPPED())
  480                 return;
  481 
  482         td->td_critnest++;      /* critical_enter(); */
  483         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  484         rm_tracker_remove(pc, tracker);
  485         td->td_critnest--;
  486         sched_unpin();
  487 
  488         if (0 == (td->td_owepreempt | tracker->rmp_flags))
  489                 return;
  490 
  491         _rm_unlock_hard(td, tracker);
  492 }
  493 
  494 void
  495 _rm_wlock(struct rmlock *rm)
  496 {
  497         struct rm_priotracker *prio;
  498         struct turnstile *ts;
  499         cpuset_t readcpus;
  500 
  501         if (SCHEDULER_STOPPED())
  502                 return;
  503 
  504         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  505                 sx_xlock(&rm->rm_lock_sx);
  506         else
  507                 mtx_lock(&rm->rm_lock_mtx);
  508 
  509         if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
  510                 /* Get all read tokens back */
  511                 readcpus = all_cpus;
  512                 CPU_NAND(&readcpus, &rm->rm_writecpus);
  513                 rm->rm_writecpus = all_cpus;
  514 
  515                 /*
  516                  * Assumes rm->rm_writecpus update is visible on other CPUs
  517                  * before rm_cleanIPI is called.
  518                  */
  519 #ifdef SMP
  520                 smp_rendezvous_cpus(readcpus,
  521                     smp_no_rendevous_barrier,
  522                     rm_cleanIPI,
  523                     smp_no_rendevous_barrier,
  524                     rm);
  525 
  526 #else
  527                 rm_cleanIPI(rm);
  528 #endif
  529 
  530                 mtx_lock_spin(&rm_spinlock);
  531                 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
  532                         ts = turnstile_trywait(&rm->lock_object);
  533                         prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
  534                         mtx_unlock_spin(&rm_spinlock);
  535                         turnstile_wait(ts, prio->rmp_thread,
  536                             TS_EXCLUSIVE_QUEUE);
  537                         mtx_lock_spin(&rm_spinlock);
  538                 }
  539                 mtx_unlock_spin(&rm_spinlock);
  540         }
  541 }
  542 
  543 void
  544 _rm_wunlock(struct rmlock *rm)
  545 {
  546 
  547         if (rm->lock_object.lo_flags & LO_SLEEPABLE)
  548                 sx_xunlock(&rm->rm_lock_sx);
  549         else
  550                 mtx_unlock(&rm->rm_lock_mtx);
  551 }
  552 
  553 #ifdef LOCK_DEBUG
  554 
  555 void
  556 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  557 {
  558 
  559         if (SCHEDULER_STOPPED())
  560                 return;
  561 
  562         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  563             ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
  564             curthread, rm->lock_object.lo_name, file, line));
  565         KASSERT(!rm_destroyed(rm),
  566             ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
  567         _rm_assert(rm, RA_UNLOCKED, file, line);
  568 
  569         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
  570             file, line, NULL);
  571 
  572         _rm_wlock(rm);
  573 
  574         LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
  575 
  576         WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  577 
  578         curthread->td_locks++;
  579 
  580 }
  581 
  582 void
  583 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  584 {
  585 
  586         if (SCHEDULER_STOPPED())
  587                 return;
  588 
  589         KASSERT(!rm_destroyed(rm),
  590             ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
  591         _rm_assert(rm, RA_WLOCKED, file, line);
  592         WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  593         LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
  594         _rm_wunlock(rm);
  595         curthread->td_locks--;
  596 }
  597 
  598 int
  599 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  600     int trylock, const char *file, int line)
  601 {
  602 
  603         if (SCHEDULER_STOPPED())
  604                 return (1);
  605 
  606 #ifdef INVARIANTS
  607         if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
  608                 critical_enter();
  609                 KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
  610                     curthread) == 0,
  611                     ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
  612                     rm->lock_object.lo_name, file, line));
  613                 critical_exit();
  614         }
  615 #endif
  616         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
  617             ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
  618             curthread, rm->lock_object.lo_name, file, line));
  619         KASSERT(!rm_destroyed(rm),
  620             ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
  621         if (!trylock) {
  622                 KASSERT(!rm_wowned(rm),
  623                     ("rm_rlock: wlock already held for %s @ %s:%d",
  624                     rm->lock_object.lo_name, file, line));
  625                 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
  626                     NULL);
  627         }
  628 
  629         if (_rm_rlock(rm, tracker, trylock)) {
  630                 if (trylock)
  631                         LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
  632                             line);
  633                 else
  634                         LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
  635                             line);
  636                 WITNESS_LOCK(&rm->lock_object, 0, file, line);
  637 
  638                 curthread->td_locks++;
  639 
  640                 return (1);
  641         } else if (trylock)
  642                 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
  643 
  644         return (0);
  645 }
  646 
  647 void
  648 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  649     const char *file, int line)
  650 {
  651 
  652         if (SCHEDULER_STOPPED())
  653                 return;
  654 
  655         KASSERT(!rm_destroyed(rm),
  656             ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
  657         _rm_assert(rm, RA_RLOCKED, file, line);
  658         WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
  659         LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
  660         _rm_runlock(rm, tracker);
  661         curthread->td_locks--;
  662 }
  663 
  664 #else
  665 
  666 /*
  667  * Just strip out file and line arguments if no lock debugging is enabled in
  668  * the kernel - we are called from a kernel module.
  669  */
  670 void
  671 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  672 {
  673 
  674         _rm_wlock(rm);
  675 }
  676 
  677 void
  678 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  679 {
  680 
  681         _rm_wunlock(rm);
  682 }
  683 
  684 int
  685 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  686     int trylock, const char *file, int line)
  687 {
  688 
  689         return _rm_rlock(rm, tracker, trylock);
  690 }
  691 
  692 void
  693 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  694     const char *file, int line)
  695 {
  696 
  697         _rm_runlock(rm, tracker);
  698 }
  699 
  700 #endif
  701 
  702 #ifdef INVARIANT_SUPPORT
  703 #ifndef INVARIANTS
  704 #undef _rm_assert
  705 #endif
  706 
  707 /*
  708  * Note that this does not need to use witness_assert() for read lock
  709  * assertions since an exact count of read locks held by this thread
  710  * is computable.
  711  */
  712 void
  713 _rm_assert(struct rmlock *rm, int what, const char *file, int line)
  714 {
  715         int count;
  716 
  717         if (panicstr != NULL)
  718                 return;
  719         switch (what) {
  720         case RA_LOCKED:
  721         case RA_LOCKED | RA_RECURSED:
  722         case RA_LOCKED | RA_NOTRECURSED:
  723         case RA_RLOCKED:
  724         case RA_RLOCKED | RA_RECURSED:
  725         case RA_RLOCKED | RA_NOTRECURSED:
  726                 /*
  727                  * Handle the write-locked case.  Unlike other
  728                  * primitives, writers can never recurse.
  729                  */
  730                 if (rm_wowned(rm)) {
  731                         if (what & RA_RLOCKED)
  732                                 panic("Lock %s exclusively locked @ %s:%d\n",
  733                                     rm->lock_object.lo_name, file, line);
  734                         if (what & RA_RECURSED)
  735                                 panic("Lock %s not recursed @ %s:%d\n",
  736                                     rm->lock_object.lo_name, file, line);
  737                         break;
  738                 }
  739 
  740                 critical_enter();
  741                 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
  742                 critical_exit();
  743 
  744                 if (count == 0)
  745                         panic("Lock %s not %slocked @ %s:%d\n",
  746                             rm->lock_object.lo_name, (what & RA_RLOCKED) ?
  747                             "read " : "", file, line);
  748                 if (count > 1) {
  749                         if (what & RA_NOTRECURSED)
  750                                 panic("Lock %s recursed @ %s:%d\n",
  751                                     rm->lock_object.lo_name, file, line);
  752                 } else if (what & RA_RECURSED)
  753                         panic("Lock %s not recursed @ %s:%d\n",
  754                             rm->lock_object.lo_name, file, line);
  755                 break;
  756         case RA_WLOCKED:
  757                 if (!rm_wowned(rm))
  758                         panic("Lock %s not exclusively locked @ %s:%d\n",
  759                             rm->lock_object.lo_name, file, line);
  760                 break;
  761         case RA_UNLOCKED:
  762                 if (rm_wowned(rm))
  763                         panic("Lock %s exclusively locked @ %s:%d\n",
  764                             rm->lock_object.lo_name, file, line);
  765 
  766                 critical_enter();
  767                 count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
  768                 critical_exit();
  769 
  770                 if (count != 0)
  771                         panic("Lock %s read locked @ %s:%d\n",
  772                             rm->lock_object.lo_name, file, line);
  773                 break;
  774         default:
  775                 panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
  776                     line);
  777         }
  778 }
  779 #endif /* INVARIANT_SUPPORT */
  780 
  781 #ifdef DDB
  782 static void
  783 print_tracker(struct rm_priotracker *tr)
  784 {
  785         struct thread *td;
  786 
  787         td = tr->rmp_thread;
  788         db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
  789             td->td_proc->p_pid, td->td_name);
  790         if (tr->rmp_flags & RMPF_ONQUEUE) {
  791                 db_printf("ONQUEUE");
  792                 if (tr->rmp_flags & RMPF_SIGNAL)
  793                         db_printf(",SIGNAL");
  794         } else
  795                 db_printf("");
  796         db_printf("}\n");
  797 }
  798 
  799 static void
  800 db_show_rm(struct lock_object *lock)
  801 {
  802         struct rm_priotracker *tr;
  803         struct rm_queue *queue;
  804         struct rmlock *rm;
  805         struct lock_class *lc;
  806         struct pcpu *pc;
  807 
  808         rm = (struct rmlock *)lock;
  809         db_printf(" writecpus: ");
  810         ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
  811         db_printf("\n");
  812         db_printf(" per-CPU readers:\n");
  813         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
  814                 for (queue = pc->pc_rm_queue.rmq_next;
  815                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
  816                         tr = (struct rm_priotracker *)queue;
  817                         if (tr->rmp_rmlock == rm)
  818                                 print_tracker(tr);
  819                 }
  820         db_printf(" active readers:\n");
  821         LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
  822                 print_tracker(tr);
  823         lc = LOCK_CLASS(&rm->rm_wlock_object);
  824         db_printf("Backing write-lock (%s):\n", lc->lc_name);
  825         lc->lc_ddb_show(&rm->rm_wlock_object);
  826 }
  827 #endif

Cache object: 3fb0130d098e9722c43c691c1e4d2fb1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.