The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_rmlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Machine independent bits of reader/writer lock implementation.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_rmlock.c 235404 2012-05-13 17:01:32Z avg $");
   36 
   37 #include "opt_ddb.h"
   38 #include "opt_kdtrace.h"
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 
   43 #include <sys/kernel.h>
   44 #include <sys/ktr.h>
   45 #include <sys/lock.h>
   46 #include <sys/mutex.h>
   47 #include <sys/proc.h>
   48 #include <sys/rmlock.h>
   49 #include <sys/sched.h>
   50 #include <sys/smp.h>
   51 #include <sys/turnstile.h>
   52 #include <sys/lock_profile.h>
   53 #include <machine/cpu.h>
   54 
   55 #ifdef DDB
   56 #include <ddb/ddb.h>
   57 #endif
   58 
   59 #define RMPF_ONQUEUE    1
   60 #define RMPF_SIGNAL     2
   61 
   62 /*
   63  * To support usage of rmlock in CVs and msleep yet another list for the
   64  * priority tracker would be needed.  Using this lock for cv and msleep also
   65  * does not seem very useful
   66  */
   67 
   68 static __inline void compiler_memory_barrier(void) {
   69         __asm __volatile("":::"memory");
   70 }
   71 
   72 static void     assert_rm(struct lock_object *lock, int what);
   73 static void     lock_rm(struct lock_object *lock, int how);
   74 #ifdef KDTRACE_HOOKS
   75 static int      owner_rm(struct lock_object *lock, struct thread **owner);
   76 #endif
   77 static int      unlock_rm(struct lock_object *lock);
   78 
   79 struct lock_class lock_class_rm = {
   80         .lc_name = "rm",
   81         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
   82         .lc_assert = assert_rm,
   83 #if 0
   84 #ifdef DDB
   85         .lc_ddb_show = db_show_rwlock,
   86 #endif
   87 #endif
   88         .lc_lock = lock_rm,
   89         .lc_unlock = unlock_rm,
   90 #ifdef KDTRACE_HOOKS
   91         .lc_owner = owner_rm,
   92 #endif
   93 };
   94 
   95 static void
   96 assert_rm(struct lock_object *lock, int what)
   97 {
   98 
   99         panic("assert_rm called");
  100 }
  101 
  102 static void
  103 lock_rm(struct lock_object *lock, int how)
  104 {
  105 
  106         panic("lock_rm called");
  107 }
  108 
  109 static int
  110 unlock_rm(struct lock_object *lock)
  111 {
  112 
  113         panic("unlock_rm called");
  114 }
  115 
  116 #ifdef KDTRACE_HOOKS
  117 static int
  118 owner_rm(struct lock_object *lock, struct thread **owner)
  119 {
  120 
  121         panic("owner_rm called");
  122 }
  123 #endif
  124 
  125 static struct mtx rm_spinlock;
  126 
  127 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
  128 
  129 /*
  130  * Add or remove tracker from per-cpu list.
  131  *
  132  * The per-cpu list can be traversed at any time in forward direction from an
  133  * interrupt on the *local* cpu.
  134  */
  135 static void inline
  136 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
  137 {
  138         struct rm_queue *next;
  139 
  140         /* Initialize all tracker pointers */
  141         tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
  142         next = pc->pc_rm_queue.rmq_next;
  143         tracker->rmp_cpuQueue.rmq_next = next;
  144 
  145         /* rmq_prev is not used during froward traversal. */
  146         next->rmq_prev = &tracker->rmp_cpuQueue;
  147 
  148         /* Update pointer to first element. */
  149         pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
  150 }
  151 
  152 static void inline
  153 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
  154 {
  155         struct rm_queue *next, *prev;
  156 
  157         next = tracker->rmp_cpuQueue.rmq_next;
  158         prev = tracker->rmp_cpuQueue.rmq_prev;
  159 
  160         /* Not used during forward traversal. */
  161         next->rmq_prev = prev;
  162 
  163         /* Remove from list. */
  164         prev->rmq_next = next;
  165 }
  166 
  167 static void
  168 rm_cleanIPI(void *arg)
  169 {
  170         struct pcpu *pc;
  171         struct rmlock *rm = arg;
  172         struct rm_priotracker *tracker;
  173         struct rm_queue *queue;
  174         pc = pcpu_find(curcpu);
  175 
  176         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
  177             queue = queue->rmq_next) {
  178                 tracker = (struct rm_priotracker *)queue;
  179                 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
  180                         tracker->rmp_flags = RMPF_ONQUEUE;
  181                         mtx_lock_spin(&rm_spinlock);
  182                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
  183                             rmp_qentry);
  184                         mtx_unlock_spin(&rm_spinlock);
  185                 }
  186         }
  187 }
  188 
  189 CTASSERT((RM_SLEEPABLE & LO_CLASSFLAGS) == RM_SLEEPABLE);
  190 
  191 void
  192 rm_init_flags(struct rmlock *rm, const char *name, int opts)
  193 {
  194         int liflags;
  195 
  196         liflags = 0;
  197         if (!(opts & RM_NOWITNESS))
  198                 liflags |= LO_WITNESS;
  199         if (opts & RM_RECURSE)
  200                 liflags |= LO_RECURSABLE;
  201         rm->rm_writecpus = all_cpus;
  202         LIST_INIT(&rm->rm_activeReaders);
  203         if (opts & RM_SLEEPABLE) {
  204                 liflags |= RM_SLEEPABLE;
  205                 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_RECURSE);
  206         } else
  207                 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
  208         lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
  209 }
  210 
  211 void
  212 rm_init(struct rmlock *rm, const char *name)
  213 {
  214 
  215         rm_init_flags(rm, name, 0);
  216 }
  217 
  218 void
  219 rm_destroy(struct rmlock *rm)
  220 {
  221 
  222         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  223                 sx_destroy(&rm->rm_lock_sx);
  224         else
  225                 mtx_destroy(&rm->rm_lock_mtx);
  226         lock_destroy(&rm->lock_object);
  227 }
  228 
  229 int
  230 rm_wowned(struct rmlock *rm)
  231 {
  232 
  233         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  234                 return (sx_xlocked(&rm->rm_lock_sx));
  235         else
  236                 return (mtx_owned(&rm->rm_lock_mtx));
  237 }
  238 
  239 void
  240 rm_sysinit(void *arg)
  241 {
  242         struct rm_args *args = arg;
  243 
  244         rm_init(args->ra_rm, args->ra_desc);
  245 }
  246 
  247 void
  248 rm_sysinit_flags(void *arg)
  249 {
  250         struct rm_args_flags *args = arg;
  251 
  252         rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
  253 }
  254 
  255 static int
  256 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
  257 {
  258         struct pcpu *pc;
  259         struct rm_queue *queue;
  260         struct rm_priotracker *atracker;
  261 
  262         critical_enter();
  263         pc = pcpu_find(curcpu);
  264 
  265         /* Check if we just need to do a proper critical_exit. */
  266         if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
  267                 critical_exit();
  268                 return (1);
  269         }
  270 
  271         /* Remove our tracker from the per-cpu list. */
  272         rm_tracker_remove(pc, tracker);
  273 
  274         /* Check to see if the IPI granted us the lock after all. */
  275         if (tracker->rmp_flags) {
  276                 /* Just add back tracker - we hold the lock. */
  277                 rm_tracker_add(pc, tracker);
  278                 critical_exit();
  279                 return (1);
  280         }
  281 
  282         /*
  283          * We allow readers to aquire a lock even if a writer is blocked if
  284          * the lock is recursive and the reader already holds the lock.
  285          */
  286         if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  287                 /*
  288                  * Just grant the lock if this thread already has a tracker
  289                  * for this lock on the per-cpu queue.
  290                  */
  291                 for (queue = pc->pc_rm_queue.rmq_next;
  292                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
  293                         atracker = (struct rm_priotracker *)queue;
  294                         if ((atracker->rmp_rmlock == rm) &&
  295                             (atracker->rmp_thread == tracker->rmp_thread)) {
  296                                 mtx_lock_spin(&rm_spinlock);
  297                                 LIST_INSERT_HEAD(&rm->rm_activeReaders,
  298                                     tracker, rmp_qentry);
  299                                 tracker->rmp_flags = RMPF_ONQUEUE;
  300                                 mtx_unlock_spin(&rm_spinlock);
  301                                 rm_tracker_add(pc, tracker);
  302                                 critical_exit();
  303                                 return (1);
  304                         }
  305                 }
  306         }
  307 
  308         sched_unpin();
  309         critical_exit();
  310 
  311         if (trylock) {
  312                 if (rm->lock_object.lo_flags & RM_SLEEPABLE) {
  313                         if (!sx_try_xlock(&rm->rm_lock_sx))
  314                                 return (0);
  315                 } else {
  316                         if (!mtx_trylock(&rm->rm_lock_mtx))
  317                                 return (0);
  318                 }
  319         } else {
  320                 if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  321                         sx_xlock(&rm->rm_lock_sx);
  322                 else
  323                         mtx_lock(&rm->rm_lock_mtx);
  324         }
  325 
  326         critical_enter();
  327         pc = pcpu_find(curcpu);
  328         CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
  329         rm_tracker_add(pc, tracker);
  330         sched_pin();
  331         critical_exit();
  332 
  333         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  334                 sx_xunlock(&rm->rm_lock_sx);
  335         else
  336                 mtx_unlock(&rm->rm_lock_mtx);
  337 
  338         return (1);
  339 }
  340 
  341 int
  342 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
  343 {
  344         struct thread *td = curthread;
  345         struct pcpu *pc;
  346 
  347         if (SCHEDULER_STOPPED())
  348                 return (1);
  349 
  350         tracker->rmp_flags  = 0;
  351         tracker->rmp_thread = td;
  352         tracker->rmp_rmlock = rm;
  353 
  354         td->td_critnest++;      /* critical_enter(); */
  355 
  356         compiler_memory_barrier();
  357 
  358         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  359 
  360         rm_tracker_add(pc, tracker);
  361 
  362         sched_pin();
  363 
  364         compiler_memory_barrier();
  365 
  366         td->td_critnest--;
  367 
  368         /*
  369          * Fast path to combine two common conditions into a single
  370          * conditional jump.
  371          */
  372         if (0 == (td->td_owepreempt |
  373             CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
  374                 return (1);
  375 
  376         /* We do not have a read token and need to acquire one. */
  377         return _rm_rlock_hard(rm, tracker, trylock);
  378 }
  379 
  380 static void
  381 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
  382 {
  383 
  384         if (td->td_owepreempt) {
  385                 td->td_critnest++;
  386                 critical_exit();
  387         }
  388 
  389         if (!tracker->rmp_flags)
  390                 return;
  391 
  392         mtx_lock_spin(&rm_spinlock);
  393         LIST_REMOVE(tracker, rmp_qentry);
  394 
  395         if (tracker->rmp_flags & RMPF_SIGNAL) {
  396                 struct rmlock *rm;
  397                 struct turnstile *ts;
  398 
  399                 rm = tracker->rmp_rmlock;
  400 
  401                 turnstile_chain_lock(&rm->lock_object);
  402                 mtx_unlock_spin(&rm_spinlock);
  403 
  404                 ts = turnstile_lookup(&rm->lock_object);
  405 
  406                 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
  407                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
  408                 turnstile_chain_unlock(&rm->lock_object);
  409         } else
  410                 mtx_unlock_spin(&rm_spinlock);
  411 }
  412 
  413 void
  414 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
  415 {
  416         struct pcpu *pc;
  417         struct thread *td = tracker->rmp_thread;
  418 
  419         if (SCHEDULER_STOPPED())
  420                 return;
  421 
  422         td->td_critnest++;      /* critical_enter(); */
  423         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  424         rm_tracker_remove(pc, tracker);
  425         td->td_critnest--;
  426         sched_unpin();
  427 
  428         if (0 == (td->td_owepreempt | tracker->rmp_flags))
  429                 return;
  430 
  431         _rm_unlock_hard(td, tracker);
  432 }
  433 
  434 void
  435 _rm_wlock(struct rmlock *rm)
  436 {
  437         struct rm_priotracker *prio;
  438         struct turnstile *ts;
  439         cpuset_t readcpus;
  440 
  441         if (SCHEDULER_STOPPED())
  442                 return;
  443 
  444         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  445                 sx_xlock(&rm->rm_lock_sx);
  446         else
  447                 mtx_lock(&rm->rm_lock_mtx);
  448 
  449         if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
  450                 /* Get all read tokens back */
  451                 readcpus = all_cpus;
  452                 CPU_NAND(&readcpus, &rm->rm_writecpus);
  453                 rm->rm_writecpus = all_cpus;
  454 
  455                 /*
  456                  * Assumes rm->rm_writecpus update is visible on other CPUs
  457                  * before rm_cleanIPI is called.
  458                  */
  459 #ifdef SMP
  460                 smp_rendezvous_cpus(readcpus,
  461                     smp_no_rendevous_barrier,
  462                     rm_cleanIPI,
  463                     smp_no_rendevous_barrier,
  464                     rm);
  465 
  466 #else
  467                 rm_cleanIPI(rm);
  468 #endif
  469 
  470                 mtx_lock_spin(&rm_spinlock);
  471                 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
  472                         ts = turnstile_trywait(&rm->lock_object);
  473                         prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
  474                         mtx_unlock_spin(&rm_spinlock);
  475                         turnstile_wait(ts, prio->rmp_thread,
  476                             TS_EXCLUSIVE_QUEUE);
  477                         mtx_lock_spin(&rm_spinlock);
  478                 }
  479                 mtx_unlock_spin(&rm_spinlock);
  480         }
  481 }
  482 
  483 void
  484 _rm_wunlock(struct rmlock *rm)
  485 {
  486 
  487         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  488                 sx_xunlock(&rm->rm_lock_sx);
  489         else
  490                 mtx_unlock(&rm->rm_lock_mtx);
  491 }
  492 
  493 #ifdef LOCK_DEBUG
  494 
  495 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  496 {
  497 
  498         if (SCHEDULER_STOPPED())
  499                 return;
  500 
  501         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
  502             file, line, NULL);
  503 
  504         _rm_wlock(rm);
  505 
  506         LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
  507 
  508         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  509                 WITNESS_LOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
  510                     file, line);        
  511         else
  512                 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  513 
  514         curthread->td_locks++;
  515 
  516 }
  517 
  518 void
  519 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  520 {
  521 
  522         if (SCHEDULER_STOPPED())
  523                 return;
  524 
  525         curthread->td_locks--;
  526         if (rm->lock_object.lo_flags & RM_SLEEPABLE)
  527                 WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE,
  528                     file, line);
  529         else
  530                 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  531         LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
  532         _rm_wunlock(rm);
  533 }
  534 
  535 int
  536 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  537     int trylock, const char *file, int line)
  538 {
  539 
  540         if (SCHEDULER_STOPPED())
  541                 return (1);
  542 
  543         if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE))
  544                 WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER,
  545                     file, line, NULL);
  546         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
  547 
  548         if (_rm_rlock(rm, tracker, trylock)) {
  549                 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
  550 
  551                 WITNESS_LOCK(&rm->lock_object, 0, file, line);
  552 
  553                 curthread->td_locks++;
  554 
  555                 return (1);
  556         }
  557 
  558         return (0);
  559 }
  560 
  561 void
  562 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  563     const char *file, int line)
  564 {
  565 
  566         if (SCHEDULER_STOPPED())
  567                 return;
  568 
  569         curthread->td_locks--;
  570         WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
  571         LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
  572         _rm_runlock(rm, tracker);
  573 }
  574 
  575 #else
  576 
  577 /*
  578  * Just strip out file and line arguments if no lock debugging is enabled in
  579  * the kernel - we are called from a kernel module.
  580  */
  581 void
  582 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  583 {
  584 
  585         _rm_wlock(rm);
  586 }
  587 
  588 void
  589 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  590 {
  591 
  592         _rm_wunlock(rm);
  593 }
  594 
  595 int
  596 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  597     int trylock, const char *file, int line)
  598 {
  599 
  600         return _rm_rlock(rm, tracker, trylock);
  601 }
  602 
  603 void
  604 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  605     const char *file, int line)
  606 {
  607 
  608         _rm_runlock(rm, tracker);
  609 }
  610 
  611 #endif

Cache object: 58357420a790c89065686e60cf7b5935


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.