The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_rmlock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Machine independent bits of reader/writer lock implementation.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 #include "opt_ddb.h"
   38 #include "opt_kdtrace.h"
   39 
   40 #include <sys/param.h>
   41 #include <sys/systm.h>
   42 
   43 #include <sys/kernel.h>
   44 #include <sys/ktr.h>
   45 #include <sys/lock.h>
   46 #include <sys/mutex.h>
   47 #include <sys/proc.h>
   48 #include <sys/rmlock.h>
   49 #include <sys/sched.h>
   50 #include <sys/smp.h>
   51 #include <sys/systm.h>
   52 #include <sys/turnstile.h>
   53 #include <sys/lock_profile.h>
   54 #include <machine/cpu.h>
   55 
   56 #ifdef DDB
   57 #include <ddb/ddb.h>
   58 #endif
   59 
   60 #define RMPF_ONQUEUE    1
   61 #define RMPF_SIGNAL     2
   62 
   63 /*
   64  * To support usage of rmlock in CVs and msleep yet another list for the
   65  * priority tracker would be needed.  Using this lock for cv and msleep also
   66  * does not seem very useful
   67  */
   68 
   69 static __inline void compiler_memory_barrier(void) {
   70         __asm __volatile("":::"memory");
   71 }
   72 
   73 static void     assert_rm(struct lock_object *lock, int what);
   74 static void     lock_rm(struct lock_object *lock, int how);
   75 #ifdef KDTRACE_HOOKS
   76 static int      owner_rm(struct lock_object *lock, struct thread **owner);
   77 #endif
   78 static int      unlock_rm(struct lock_object *lock);
   79 
   80 struct lock_class lock_class_rm = {
   81         .lc_name = "rm",
   82         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
   83         .lc_assert = assert_rm,
   84 #if 0
   85 #ifdef DDB
   86         .lc_ddb_show = db_show_rwlock,
   87 #endif
   88 #endif
   89         .lc_lock = lock_rm,
   90         .lc_unlock = unlock_rm,
   91 #ifdef KDTRACE_HOOKS
   92         .lc_owner = owner_rm,
   93 #endif
   94 };
   95 
   96 static void
   97 assert_rm(struct lock_object *lock, int what)
   98 {
   99 
  100         panic("assert_rm called");
  101 }
  102 
  103 static void
  104 lock_rm(struct lock_object *lock, int how)
  105 {
  106 
  107         panic("lock_rm called");
  108 }
  109 
  110 static int
  111 unlock_rm(struct lock_object *lock)
  112 {
  113 
  114         panic("unlock_rm called");
  115 }
  116 
  117 #ifdef KDTRACE_HOOKS
  118 static int
  119 owner_rm(struct lock_object *lock, struct thread **owner)
  120 {
  121 
  122         panic("owner_rm called");
  123 }
  124 #endif
  125 
  126 static struct mtx rm_spinlock;
  127 
  128 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
  129 
  130 /*
  131  * Add or remove tracker from per-cpu list.
  132  *
  133  * The per-cpu list can be traversed at any time in forward direction from an
  134  * interrupt on the *local* cpu.
  135  */
  136 static void inline
  137 rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
  138 {
  139         struct rm_queue *next;
  140 
  141         /* Initialize all tracker pointers */
  142         tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
  143         next = pc->pc_rm_queue.rmq_next;
  144         tracker->rmp_cpuQueue.rmq_next = next;
  145 
  146         /* rmq_prev is not used during froward traversal. */
  147         next->rmq_prev = &tracker->rmp_cpuQueue;
  148 
  149         /* Update pointer to first element. */
  150         pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
  151 }
  152 
  153 static void inline
  154 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
  155 {
  156         struct rm_queue *next, *prev;
  157 
  158         next = tracker->rmp_cpuQueue.rmq_next;
  159         prev = tracker->rmp_cpuQueue.rmq_prev;
  160 
  161         /* Not used during forward traversal. */
  162         next->rmq_prev = prev;
  163 
  164         /* Remove from list. */
  165         prev->rmq_next = next;
  166 }
  167 
  168 static void
  169 rm_cleanIPI(void *arg)
  170 {
  171         struct pcpu *pc;
  172         struct rmlock *rm = arg;
  173         struct rm_priotracker *tracker;
  174         struct rm_queue *queue;
  175         pc = pcpu_find(curcpu);
  176 
  177         for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
  178             queue = queue->rmq_next) {
  179                 tracker = (struct rm_priotracker *)queue;
  180                 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
  181                         tracker->rmp_flags = RMPF_ONQUEUE;
  182                         mtx_lock_spin(&rm_spinlock);
  183                         LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
  184                             rmp_qentry);
  185                         mtx_unlock_spin(&rm_spinlock);
  186                 }
  187         }
  188 }
  189 
  190 void
  191 rm_init_flags(struct rmlock *rm, const char *name, int opts)
  192 {
  193         int liflags;
  194 
  195         liflags = 0;
  196         if (!(opts & RM_NOWITNESS))
  197                 liflags |= LO_WITNESS;
  198         if (opts & RM_RECURSE)
  199                 liflags |= LO_RECURSABLE;
  200         rm->rm_noreadtoken = 1;
  201         LIST_INIT(&rm->rm_activeReaders);
  202         mtx_init(&rm->rm_lock, name, "rmlock_mtx", MTX_NOWITNESS);
  203         lock_init(&rm->lock_object, &lock_class_rm, name, NULL, liflags);
  204 }
  205 
  206 void
  207 rm_init(struct rmlock *rm, const char *name)
  208 {
  209 
  210         rm_init_flags(rm, name, 0);
  211 }
  212 
  213 void
  214 rm_destroy(struct rmlock *rm)
  215 {
  216 
  217         mtx_destroy(&rm->rm_lock);
  218         lock_destroy(&rm->lock_object);
  219 }
  220 
  221 int
  222 rm_wowned(struct rmlock *rm)
  223 {
  224 
  225         return (mtx_owned(&rm->rm_lock));
  226 }
  227 
  228 void
  229 rm_sysinit(void *arg)
  230 {
  231         struct rm_args *args = arg;
  232 
  233         rm_init(args->ra_rm, args->ra_desc);
  234 }
  235 
  236 void
  237 rm_sysinit_flags(void *arg)
  238 {
  239         struct rm_args_flags *args = arg;
  240 
  241         rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
  242 }
  243 
  244 static void
  245 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker)
  246 {
  247         struct pcpu *pc;
  248         struct rm_queue *queue;
  249         struct rm_priotracker *atracker;
  250 
  251         critical_enter();
  252         pc = pcpu_find(curcpu);
  253 
  254         /* Check if we just need to do a proper critical_exit. */
  255         if (0 == rm->rm_noreadtoken) {
  256                 critical_exit();
  257                 return;
  258         }
  259 
  260         /* Remove our tracker from the per-cpu list. */
  261         rm_tracker_remove(pc, tracker);
  262 
  263         /* Check to see if the IPI granted us the lock after all. */
  264         if (tracker->rmp_flags) {
  265                 /* Just add back tracker - we hold the lock. */
  266                 rm_tracker_add(pc, tracker);
  267                 critical_exit();
  268                 return;
  269         }
  270 
  271         /*
  272          * We allow readers to aquire a lock even if a writer is blocked if
  273          * the lock is recursive and the reader already holds the lock.
  274          */
  275         if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
  276                 /*
  277                  * Just grant the lock if this thread already has a tracker
  278                  * for this lock on the per-cpu queue.
  279                  */
  280                 for (queue = pc->pc_rm_queue.rmq_next;
  281                     queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
  282                         atracker = (struct rm_priotracker *)queue;
  283                         if ((atracker->rmp_rmlock == rm) &&
  284                             (atracker->rmp_thread == tracker->rmp_thread)) {
  285                                 mtx_lock_spin(&rm_spinlock);
  286                                 LIST_INSERT_HEAD(&rm->rm_activeReaders,
  287                                     tracker, rmp_qentry);
  288                                 tracker->rmp_flags = RMPF_ONQUEUE;
  289                                 mtx_unlock_spin(&rm_spinlock);
  290                                 rm_tracker_add(pc, tracker);
  291                                 critical_exit();
  292                                 return;
  293                         }
  294                 }
  295         }
  296 
  297         sched_unpin();
  298         critical_exit();
  299 
  300         mtx_lock(&rm->rm_lock);
  301         rm->rm_noreadtoken = 0;
  302         critical_enter();
  303 
  304         pc = pcpu_find(curcpu);
  305         rm_tracker_add(pc, tracker);
  306         sched_pin();
  307         critical_exit();
  308 
  309         mtx_unlock(&rm->rm_lock);
  310 }
  311 
  312 void
  313 _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker)
  314 {
  315         struct thread *td = curthread;
  316         struct pcpu *pc;
  317 
  318         if (SCHEDULER_STOPPED())
  319                 return;
  320 
  321         tracker->rmp_flags  = 0;
  322         tracker->rmp_thread = td;
  323         tracker->rmp_rmlock = rm;
  324 
  325         td->td_critnest++;      /* critical_enter(); */
  326 
  327         compiler_memory_barrier();
  328 
  329         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  330 
  331         rm_tracker_add(pc, tracker);
  332 
  333         sched_pin();
  334 
  335         compiler_memory_barrier();
  336 
  337         td->td_critnest--;
  338 
  339         /*
  340          * Fast path to combine two common conditions into a single
  341          * conditional jump.
  342          */
  343         if (0 == (td->td_owepreempt | rm->rm_noreadtoken))
  344                 return;
  345 
  346         /* We do not have a read token and need to acquire one. */
  347         _rm_rlock_hard(rm, tracker);
  348 }
  349 
  350 static void
  351 _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
  352 {
  353 
  354         if (td->td_owepreempt) {
  355                 td->td_critnest++;
  356                 critical_exit();
  357         }
  358 
  359         if (!tracker->rmp_flags)
  360                 return;
  361 
  362         mtx_lock_spin(&rm_spinlock);
  363         LIST_REMOVE(tracker, rmp_qentry);
  364 
  365         if (tracker->rmp_flags & RMPF_SIGNAL) {
  366                 struct rmlock *rm;
  367                 struct turnstile *ts;
  368 
  369                 rm = tracker->rmp_rmlock;
  370 
  371                 turnstile_chain_lock(&rm->lock_object);
  372                 mtx_unlock_spin(&rm_spinlock);
  373 
  374                 ts = turnstile_lookup(&rm->lock_object);
  375 
  376                 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
  377                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
  378                 turnstile_chain_unlock(&rm->lock_object);
  379         } else
  380                 mtx_unlock_spin(&rm_spinlock);
  381 }
  382 
  383 void
  384 _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
  385 {
  386         struct pcpu *pc;
  387         struct thread *td = tracker->rmp_thread;
  388 
  389         if (SCHEDULER_STOPPED())
  390                 return;
  391 
  392         td->td_critnest++;      /* critical_enter(); */
  393         pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
  394         rm_tracker_remove(pc, tracker);
  395         td->td_critnest--;
  396         sched_unpin();
  397 
  398         if (0 == (td->td_owepreempt | tracker->rmp_flags))
  399                 return;
  400 
  401         _rm_unlock_hard(td, tracker);
  402 }
  403 
  404 void
  405 _rm_wlock(struct rmlock *rm)
  406 {
  407         struct rm_priotracker *prio;
  408         struct turnstile *ts;
  409 
  410         if (SCHEDULER_STOPPED())
  411                 return;
  412 
  413         mtx_lock(&rm->rm_lock);
  414 
  415         if (rm->rm_noreadtoken == 0) {
  416                 /* Get all read tokens back */
  417 
  418                 rm->rm_noreadtoken = 1;
  419 
  420                 /*
  421                  * Assumes rm->rm_noreadtoken update is visible on other CPUs
  422                  * before rm_cleanIPI is called.
  423                  */
  424 #ifdef SMP
  425                 smp_rendezvous(smp_no_rendevous_barrier,
  426                     rm_cleanIPI,
  427                     smp_no_rendevous_barrier,
  428                     rm);
  429 
  430 #else
  431                 rm_cleanIPI(rm);
  432 #endif
  433 
  434                 mtx_lock_spin(&rm_spinlock);
  435                 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
  436                         ts = turnstile_trywait(&rm->lock_object);
  437                         prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
  438                         mtx_unlock_spin(&rm_spinlock);
  439                         turnstile_wait(ts, prio->rmp_thread,
  440                             TS_EXCLUSIVE_QUEUE);
  441                         mtx_lock_spin(&rm_spinlock);
  442                 }
  443                 mtx_unlock_spin(&rm_spinlock);
  444         }
  445 }
  446 
  447 void
  448 _rm_wunlock(struct rmlock *rm)
  449 {
  450 
  451         mtx_unlock(&rm->rm_lock);
  452 }
  453 
  454 #ifdef LOCK_DEBUG
  455 
  456 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  457 {
  458 
  459         if (SCHEDULER_STOPPED())
  460                 return;
  461 
  462         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
  463             file, line, NULL);
  464 
  465         _rm_wlock(rm);
  466 
  467         LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
  468 
  469         WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  470 
  471         curthread->td_locks++;
  472 
  473 }
  474 
  475 void
  476 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  477 {
  478 
  479         if (SCHEDULER_STOPPED())
  480                 return;
  481 
  482         curthread->td_locks--;
  483         WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
  484         LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
  485         _rm_wunlock(rm);
  486 }
  487 
  488 void
  489 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  490     const char *file, int line)
  491 {
  492 
  493         if (SCHEDULER_STOPPED())
  494                 return;
  495 
  496         WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL);
  497 
  498         _rm_rlock(rm, tracker);
  499 
  500         LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
  501 
  502         WITNESS_LOCK(&rm->lock_object, 0, file, line);
  503 
  504         curthread->td_locks++;
  505 }
  506 
  507 void
  508 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  509     const char *file, int line)
  510 {
  511 
  512         if (SCHEDULER_STOPPED())
  513                 return;
  514 
  515         curthread->td_locks--;
  516         WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
  517         LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
  518         _rm_runlock(rm, tracker);
  519 }
  520 
  521 #else
  522 
  523 /*
  524  * Just strip out file and line arguments if no lock debugging is enabled in
  525  * the kernel - we are called from a kernel module.
  526  */
  527 void
  528 _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
  529 {
  530 
  531         _rm_wlock(rm);
  532 }
  533 
  534 void
  535 _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
  536 {
  537 
  538         _rm_wunlock(rm);
  539 }
  540 
  541 void
  542 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  543     const char *file, int line)
  544 {
  545 
  546         _rm_rlock(rm, tracker);
  547 }
  548 
  549 void
  550 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
  551     const char *file, int line)
  552 {
  553 
  554         _rm_runlock(rm, tracker);
  555 }
  556 
  557 #endif

Cache object: 0afe65d49a4cbe198d882eaf4113cca8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.