The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_mutex.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_mutex.c,v 1.102 2023/01/27 09:28:41 ozaki-r Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 2002, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Jason R. Thorpe and Andrew Doran.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * Kernel mutex implementation, modeled after those found in Solaris,
   34  * a description of which can be found in:
   35  *
   36  *      Solaris Internals: Core Kernel Architecture, Jim Mauro and
   37  *          Richard McDougall.
   38  */
   39 
   40 #define __MUTEX_PRIVATE
   41 
   42 #include <sys/cdefs.h>
   43 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.102 2023/01/27 09:28:41 ozaki-r Exp $");
   44 
   45 #include <sys/param.h>
   46 #include <sys/atomic.h>
   47 #include <sys/proc.h>
   48 #include <sys/mutex.h>
   49 #include <sys/sched.h>
   50 #include <sys/sleepq.h>
   51 #include <sys/systm.h>
   52 #include <sys/lockdebug.h>
   53 #include <sys/kernel.h>
   54 #include <sys/intr.h>
   55 #include <sys/lock.h>
   56 #include <sys/types.h>
   57 #include <sys/cpu.h>
   58 #include <sys/pserialize.h>
   59 
   60 #include <dev/lockstat.h>
   61 
   62 #include <machine/lock.h>
   63 
   64 /*
   65  * When not running a debug kernel, spin mutexes are not much
   66  * more than an splraiseipl() and splx() pair.
   67  */
   68 
   69 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
   70 #define FULL
   71 #endif
   72 
   73 /*
   74  * Debugging support.
   75  */
   76 
   77 #define MUTEX_WANTLOCK(mtx)                                     \
   78     LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),               \
   79         (uintptr_t)__builtin_return_address(0), 0)
   80 #define MUTEX_TESTLOCK(mtx)                                     \
   81     LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),               \
   82         (uintptr_t)__builtin_return_address(0), -1)
   83 #define MUTEX_LOCKED(mtx)                                       \
   84     LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL,           \
   85         (uintptr_t)__builtin_return_address(0), 0)
   86 #define MUTEX_UNLOCKED(mtx)                                     \
   87     LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx),               \
   88         (uintptr_t)__builtin_return_address(0), 0)
   89 #define MUTEX_ABORT(mtx, msg)                                   \
   90     mutex_abort(__func__, __LINE__, mtx, msg)
   91 
   92 #if defined(LOCKDEBUG)
   93 
   94 #define MUTEX_DASSERT(mtx, cond)                                \
   95 do {                                                            \
   96         if (__predict_false(!(cond)))                           \
   97                 MUTEX_ABORT(mtx, "assertion failed: " #cond);   \
   98 } while (/* CONSTCOND */ 0)
   99 
  100 #else   /* LOCKDEBUG */
  101 
  102 #define MUTEX_DASSERT(mtx, cond)        /* nothing */
  103 
  104 #endif /* LOCKDEBUG */
  105 
  106 #if defined(DIAGNOSTIC)
  107 
  108 #define MUTEX_ASSERT(mtx, cond)                                 \
  109 do {                                                            \
  110         if (__predict_false(!(cond)))                           \
  111                 MUTEX_ABORT(mtx, "assertion failed: " #cond);   \
  112 } while (/* CONSTCOND */ 0)
  113 
  114 #else   /* DIAGNOSTIC */
  115 
  116 #define MUTEX_ASSERT(mtx, cond) /* nothing */
  117 
  118 #endif  /* DIAGNOSTIC */
  119 
  120 /*
  121  * Some architectures can't use __cpu_simple_lock as is so allow a way
  122  * for them to use an alternate definition.
  123  */
  124 #ifndef MUTEX_SPINBIT_LOCK_INIT
  125 #define MUTEX_SPINBIT_LOCK_INIT(mtx)    __cpu_simple_lock_init(&(mtx)->mtx_lock)
  126 #endif
  127 #ifndef MUTEX_SPINBIT_LOCKED_P
  128 #define MUTEX_SPINBIT_LOCKED_P(mtx)     __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock)
  129 #endif
  130 #ifndef MUTEX_SPINBIT_LOCK_TRY
  131 #define MUTEX_SPINBIT_LOCK_TRY(mtx)     __cpu_simple_lock_try(&(mtx)->mtx_lock)
  132 #endif
  133 #ifndef MUTEX_SPINBIT_LOCK_UNLOCK
  134 #define MUTEX_SPINBIT_LOCK_UNLOCK(mtx)  __cpu_simple_unlock(&(mtx)->mtx_lock)
  135 #endif
  136 
  137 #ifndef MUTEX_INITIALIZE_SPIN_IPL
  138 #define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \
  139                                         ((mtx)->mtx_ipl = makeiplcookie((ipl)))
  140 #endif
  141 
  142 /*
  143  * Spin mutex SPL save / restore.
  144  */
  145 
  146 #define MUTEX_SPIN_SPLRAISE(mtx)                                        \
  147 do {                                                                    \
  148         const int s = splraiseipl(MUTEX_SPIN_IPL(mtx));                 \
  149         struct cpu_info * const x__ci = curcpu();                       \
  150         const int x__cnt = x__ci->ci_mtx_count--;                       \
  151         __insn_barrier();                                               \
  152         if (x__cnt == 0)                                                \
  153                 x__ci->ci_mtx_oldspl = s;                               \
  154 } while (/* CONSTCOND */ 0)
  155 
  156 #define MUTEX_SPIN_SPLRESTORE(mtx)                                      \
  157 do {                                                                    \
  158         struct cpu_info * const x__ci = curcpu();                       \
  159         const int s = x__ci->ci_mtx_oldspl;                             \
  160         __insn_barrier();                                               \
  161         if (++(x__ci->ci_mtx_count) == 0)                               \
  162                 splx(s);                                                \
  163 } while (/* CONSTCOND */ 0)
  164 
  165 /*
  166  * Memory barriers.
  167  */
  168 #ifdef __HAVE_ATOMIC_AS_MEMBAR
  169 #define MUTEX_MEMBAR_ENTER()
  170 #define MUTEX_MEMBAR_ACQUIRE()
  171 #define MUTEX_MEMBAR_RELEASE()
  172 #else
  173 #define MUTEX_MEMBAR_ENTER()            membar_enter()
  174 #define MUTEX_MEMBAR_ACQUIRE()          membar_acquire()
  175 #define MUTEX_MEMBAR_RELEASE()          membar_release()
  176 #endif
  177 
  178 /*
  179  * For architectures that provide 'simple' mutexes: they provide a
  180  * CAS function that is either MP-safe, or does not need to be MP
  181  * safe.  Adaptive mutexes on these architectures do not require an
  182  * additional interlock.
  183  */
  184 
  185 #ifdef __HAVE_SIMPLE_MUTEXES
  186 
  187 #define MUTEX_OWNER(owner)                                              \
  188         (owner & MUTEX_THREAD)
  189 #define MUTEX_HAS_WAITERS(mtx)                                          \
  190         (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
  191 
  192 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)                         \
  193 do {                                                                    \
  194         if (!dodebug)                                                   \
  195                 (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG;                  \
  196 } while (/* CONSTCOND */ 0)
  197 
  198 #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl)                        \
  199 do {                                                                    \
  200         (mtx)->mtx_owner = MUTEX_BIT_SPIN;                              \
  201         if (!dodebug)                                                   \
  202                 (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG;                  \
  203         MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl));                        \
  204         MUTEX_SPINBIT_LOCK_INIT((mtx));                                 \
  205 } while (/* CONSTCOND */ 0)
  206 
  207 #define MUTEX_DESTROY(mtx)                                              \
  208 do {                                                                    \
  209         (mtx)->mtx_owner = MUTEX_THREAD;                                \
  210 } while (/* CONSTCOND */ 0)
  211 
  212 #define MUTEX_SPIN_P(owner)             \
  213     (((owner) & MUTEX_BIT_SPIN) != 0)
  214 #define MUTEX_ADAPTIVE_P(owner)         \
  215     (((owner) & MUTEX_BIT_SPIN) == 0)
  216 
  217 #ifndef MUTEX_CAS
  218 #define MUTEX_CAS(p, o, n)              \
  219         (atomic_cas_ulong((volatile unsigned long *)(p), (o), (n)) == (o))
  220 #endif /* MUTEX_CAS */
  221 
  222 #define MUTEX_DEBUG_P(mtx)      (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0)
  223 #if defined(LOCKDEBUG)
  224 #define MUTEX_OWNED(owner)              (((owner) & ~MUTEX_BIT_NODEBUG) != 0)
  225 #define MUTEX_INHERITDEBUG(n, o)        (n) |= (o) & MUTEX_BIT_NODEBUG
  226 #else /* defined(LOCKDEBUG) */
  227 #define MUTEX_OWNED(owner)              ((owner) != 0)
  228 #define MUTEX_INHERITDEBUG(n, o)        /* nothing */
  229 #endif /* defined(LOCKDEBUG) */
  230 
  231 static inline int
  232 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
  233 {
  234         int rv;
  235         uintptr_t oldown = 0;
  236         uintptr_t newown = curthread;
  237 
  238         MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner);
  239         MUTEX_INHERITDEBUG(newown, oldown);
  240         rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown);
  241         MUTEX_MEMBAR_ACQUIRE();
  242         return rv;
  243 }
  244 
  245 static inline int
  246 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
  247 {
  248         int rv;
  249         rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
  250         MUTEX_MEMBAR_ENTER();
  251         return rv;
  252 }
  253 
  254 static inline void
  255 MUTEX_RELEASE(kmutex_t *mtx)
  256 {
  257         uintptr_t newown;
  258 
  259         MUTEX_MEMBAR_RELEASE();
  260         newown = 0;
  261         MUTEX_INHERITDEBUG(newown, mtx->mtx_owner);
  262         mtx->mtx_owner = newown;
  263 }
  264 #endif  /* __HAVE_SIMPLE_MUTEXES */
  265 
  266 /*
  267  * Patch in stubs via strong alias where they are not available.
  268  */
  269 
  270 #if defined(LOCKDEBUG)
  271 #undef  __HAVE_MUTEX_STUBS
  272 #undef  __HAVE_SPIN_MUTEX_STUBS
  273 #endif
  274 
  275 #ifndef __HAVE_MUTEX_STUBS
  276 __strong_alias(mutex_enter,mutex_vector_enter);
  277 __strong_alias(mutex_exit,mutex_vector_exit);
  278 #endif
  279 
  280 #ifndef __HAVE_SPIN_MUTEX_STUBS
  281 __strong_alias(mutex_spin_enter,mutex_vector_enter);
  282 __strong_alias(mutex_spin_exit,mutex_vector_exit);
  283 #endif
  284 
  285 static void     mutex_abort(const char *, size_t, const kmutex_t *,
  286     const char *);
  287 static void     mutex_dump(const volatile void *, lockop_printer_t);
  288 
  289 lockops_t mutex_spin_lockops = {
  290         .lo_name = "Mutex",
  291         .lo_type = LOCKOPS_SPIN,
  292         .lo_dump = mutex_dump,
  293 };
  294 
  295 lockops_t mutex_adaptive_lockops = {
  296         .lo_name = "Mutex",
  297         .lo_type = LOCKOPS_SLEEP,
  298         .lo_dump = mutex_dump,
  299 };
  300 
  301 syncobj_t mutex_syncobj = {
  302         .sobj_flag      = SOBJ_SLEEPQ_SORTED,
  303         .sobj_unsleep   = turnstile_unsleep,
  304         .sobj_changepri = turnstile_changepri,
  305         .sobj_lendpri   = sleepq_lendpri,
  306         .sobj_owner     = (void *)mutex_owner,
  307 };
  308 
  309 /*
  310  * mutex_dump:
  311  *
  312  *      Dump the contents of a mutex structure.
  313  */
  314 static void
  315 mutex_dump(const volatile void *cookie, lockop_printer_t pr)
  316 {
  317         const volatile kmutex_t *mtx = cookie;
  318         uintptr_t owner = mtx->mtx_owner;
  319 
  320         pr("owner field  : %#018lx wait/spin: %16d/%d\n",
  321             (long)MUTEX_OWNER(owner), MUTEX_HAS_WAITERS(mtx),
  322             MUTEX_SPIN_P(owner));
  323 }
  324 
  325 /*
  326  * mutex_abort:
  327  *
  328  *      Dump information about an error and panic the system.  This
  329  *      generates a lot of machine code in the DIAGNOSTIC case, so
  330  *      we ask the compiler to not inline it.
  331  */
  332 static void __noinline
  333 mutex_abort(const char *func, size_t line, const kmutex_t *mtx, const char *msg)
  334 {
  335 
  336         LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx->mtx_owner) ?
  337             &mutex_spin_lockops : &mutex_adaptive_lockops), msg);
  338 }
  339 
  340 /*
  341  * mutex_init:
  342  *
  343  *      Initialize a mutex for use.  Note that adaptive mutexes are in
  344  *      essence spin mutexes that can sleep to avoid deadlock and wasting
  345  *      CPU time.  We can't easily provide a type of mutex that always
  346  *      sleeps - see comments in mutex_vector_enter() about releasing
  347  *      mutexes unlocked.
  348  */
  349 void
  350 _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl,
  351     uintptr_t return_address)
  352 {
  353         lockops_t *lockops __unused;
  354         bool dodebug;
  355 
  356         memset(mtx, 0, sizeof(*mtx));
  357 
  358         if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
  359             ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
  360             ipl == IPL_SOFTSERIAL) {
  361                 lockops = (type == MUTEX_NODEBUG ?
  362                     NULL : &mutex_adaptive_lockops);
  363                 dodebug = LOCKDEBUG_ALLOC(mtx, lockops, return_address);
  364                 MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
  365         } else {
  366                 lockops = (type == MUTEX_NODEBUG ?
  367                     NULL : &mutex_spin_lockops);
  368                 dodebug = LOCKDEBUG_ALLOC(mtx, lockops, return_address);
  369                 MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
  370         }
  371 }
  372 
  373 void
  374 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
  375 {
  376 
  377         _mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0));
  378 }
  379 
  380 /*
  381  * mutex_destroy:
  382  *
  383  *      Tear down a mutex.
  384  */
  385 void
  386 mutex_destroy(kmutex_t *mtx)
  387 {
  388         uintptr_t owner = mtx->mtx_owner;
  389 
  390         if (MUTEX_ADAPTIVE_P(owner)) {
  391                 MUTEX_ASSERT(mtx, !MUTEX_OWNED(owner));
  392                 MUTEX_ASSERT(mtx, !MUTEX_HAS_WAITERS(mtx));
  393         } else {
  394                 MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx));
  395         }
  396 
  397         LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
  398         MUTEX_DESTROY(mtx);
  399 }
  400 
  401 #ifdef MULTIPROCESSOR
  402 /*
  403  * mutex_oncpu:
  404  *
  405  *      Return true if an adaptive mutex owner is running on a CPU in the
  406  *      system.  If the target is waiting on the kernel big lock, then we
  407  *      must release it.  This is necessary to avoid deadlock.
  408  */
  409 static bool
  410 mutex_oncpu(uintptr_t owner)
  411 {
  412         struct cpu_info *ci;
  413         lwp_t *l;
  414 
  415         KASSERT(kpreempt_disabled());
  416 
  417         if (!MUTEX_OWNED(owner)) {
  418                 return false;
  419         }
  420 
  421         /*
  422          * See lwp_dtor() why dereference of the LWP pointer is safe.
  423          * We must have kernel preemption disabled for that.
  424          */
  425         l = (lwp_t *)MUTEX_OWNER(owner);
  426         ci = l->l_cpu;
  427 
  428         if (ci && ci->ci_curlwp == l) {
  429                 /* Target is running; do we need to block? */
  430                 return (ci->ci_biglock_wanted != l);
  431         }
  432 
  433         /* Not running.  It may be safe to block now. */
  434         return false;
  435 }
  436 #endif  /* MULTIPROCESSOR */
  437 
  438 /*
  439  * mutex_vector_enter:
  440  *
  441  *      Support routine for mutex_enter() that must handle all cases.  In
  442  *      the LOCKDEBUG case, mutex_enter() is always aliased here, even if
  443  *      fast-path stubs are available.  If a mutex_spin_enter() stub is
  444  *      not available, then it is also aliased directly here.
  445  */
  446 void
  447 mutex_vector_enter(kmutex_t *mtx)
  448 {
  449         uintptr_t owner, curthread;
  450         turnstile_t *ts;
  451 #ifdef MULTIPROCESSOR
  452         u_int count;
  453 #endif
  454         LOCKSTAT_COUNTER(spincnt);
  455         LOCKSTAT_COUNTER(slpcnt);
  456         LOCKSTAT_TIMER(spintime);
  457         LOCKSTAT_TIMER(slptime);
  458         LOCKSTAT_FLAG(lsflag);
  459 
  460         /*
  461          * Handle spin mutexes.
  462          */
  463         KPREEMPT_DISABLE(curlwp);
  464         owner = mtx->mtx_owner;
  465         if (MUTEX_SPIN_P(owner)) {
  466 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
  467                 u_int spins = 0;
  468 #endif
  469                 KPREEMPT_ENABLE(curlwp);
  470                 MUTEX_SPIN_SPLRAISE(mtx);
  471                 MUTEX_WANTLOCK(mtx);
  472 #ifdef FULL
  473                 if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
  474                         MUTEX_LOCKED(mtx);
  475                         return;
  476                 }
  477 #if !defined(MULTIPROCESSOR)
  478                 MUTEX_ABORT(mtx, "locking against myself");
  479 #else /* !MULTIPROCESSOR */
  480 
  481                 LOCKSTAT_ENTER(lsflag);
  482                 LOCKSTAT_START_TIMER(lsflag, spintime);
  483                 count = SPINLOCK_BACKOFF_MIN;
  484 
  485                 /*
  486                  * Spin testing the lock word and do exponential backoff
  487                  * to reduce cache line ping-ponging between CPUs.
  488                  */
  489                 do {
  490                         while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
  491                                 SPINLOCK_SPIN_HOOK;
  492                                 SPINLOCK_BACKOFF(count);
  493 #ifdef LOCKDEBUG
  494                                 if (SPINLOCK_SPINOUT(spins))
  495                                         MUTEX_ABORT(mtx, "spinout");
  496 #endif  /* LOCKDEBUG */
  497                         }
  498                 } while (!MUTEX_SPINBIT_LOCK_TRY(mtx));
  499 
  500                 if (count != SPINLOCK_BACKOFF_MIN) {
  501                         LOCKSTAT_STOP_TIMER(lsflag, spintime);
  502                         LOCKSTAT_EVENT(lsflag, mtx,
  503                             LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
  504                 }
  505                 LOCKSTAT_EXIT(lsflag);
  506 #endif  /* !MULTIPROCESSOR */
  507 #endif  /* FULL */
  508                 MUTEX_LOCKED(mtx);
  509                 return;
  510         }
  511 
  512         curthread = (uintptr_t)curlwp;
  513 
  514         MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(owner));
  515         MUTEX_ASSERT(mtx, curthread != 0);
  516         MUTEX_ASSERT(mtx, !cpu_intr_p());
  517         MUTEX_WANTLOCK(mtx);
  518 
  519         if (__predict_true(panicstr == NULL)) {
  520                 KDASSERT(pserialize_not_in_read_section());
  521                 LOCKDEBUG_BARRIER(&kernel_lock, 1);
  522         }
  523 
  524         LOCKSTAT_ENTER(lsflag);
  525 
  526         /*
  527          * Adaptive mutex; spin trying to acquire the mutex.  If we
  528          * determine that the owner is not running on a processor,
  529          * then we stop spinning, and sleep instead.
  530          */
  531         for (;;) {
  532                 if (!MUTEX_OWNED(owner)) {
  533                         /*
  534                          * Mutex owner clear could mean two things:
  535                          *
  536                          *      * The mutex has been released.
  537                          *      * The owner field hasn't been set yet.
  538                          *
  539                          * Try to acquire it again.  If that fails,
  540                          * we'll just loop again.
  541                          */
  542                         if (MUTEX_ACQUIRE(mtx, curthread))
  543                                 break;
  544                         owner = mtx->mtx_owner;
  545                         continue;
  546                 }
  547                 if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
  548                         MUTEX_ABORT(mtx, "locking against myself");
  549                 }
  550 #ifdef MULTIPROCESSOR
  551                 /*
  552                  * Check to see if the owner is running on a processor.
  553                  * If so, then we should just spin, as the owner will
  554                  * likely release the lock very soon.
  555                  */
  556                 if (mutex_oncpu(owner)) {
  557                         LOCKSTAT_START_TIMER(lsflag, spintime);
  558                         count = SPINLOCK_BACKOFF_MIN;
  559                         do {
  560                                 KPREEMPT_ENABLE(curlwp);
  561                                 SPINLOCK_BACKOFF(count);
  562                                 KPREEMPT_DISABLE(curlwp);
  563                                 owner = mtx->mtx_owner;
  564                         } while (mutex_oncpu(owner));
  565                         LOCKSTAT_STOP_TIMER(lsflag, spintime);
  566                         LOCKSTAT_COUNT(spincnt, 1);
  567                         if (!MUTEX_OWNED(owner))
  568                                 continue;
  569                 }
  570 #endif
  571 
  572                 ts = turnstile_lookup(mtx);
  573 
  574                 /*
  575                  * Once we have the turnstile chain interlock, mark the
  576                  * mutex as having waiters.  If that fails, spin again:
  577                  * chances are that the mutex has been released.
  578                  */
  579                 if (!MUTEX_SET_WAITERS(mtx, owner)) {
  580                         turnstile_exit(mtx);
  581                         owner = mtx->mtx_owner;
  582                         continue;
  583                 }
  584 
  585 #ifdef MULTIPROCESSOR
  586                 /*
  587                  * mutex_exit() is permitted to release the mutex without
  588                  * any interlocking instructions, and the following can
  589                  * occur as a result:
  590                  *
  591                  *  CPU 1: MUTEX_SET_WAITERS()      CPU2: mutex_exit()
  592                  * ---------------------------- ----------------------------
  593                  *              ..                  acquire cache line
  594                  *              ..                   test for waiters
  595                  *      acquire cache line    <-      lose cache line
  596                  *       lock cache line                   ..
  597                  *     verify mutex is held                ..
  598                  *          set waiters                    ..
  599                  *       unlock cache line                 ..
  600                  *        lose cache line     ->    acquire cache line
  601                  *              ..                clear lock word, waiters
  602                  *        return success
  603                  *
  604                  * There is another race that can occur: a third CPU could
  605                  * acquire the mutex as soon as it is released.  Since
  606                  * adaptive mutexes are primarily spin mutexes, this is not
  607                  * something that we need to worry about too much.  What we
  608                  * do need to ensure is that the waiters bit gets set.
  609                  *
  610                  * To allow the unlocked release, we need to make some
  611                  * assumptions here:
  612                  *
  613                  * o Release is the only non-atomic/unlocked operation
  614                  *   that can be performed on the mutex.  (It must still
  615                  *   be atomic on the local CPU, e.g. in case interrupted
  616                  *   or preempted).
  617                  *
  618                  * o At any given time, MUTEX_SET_WAITERS() can only ever
  619                  *   be in progress on one CPU in the system - guaranteed
  620                  *   by the turnstile chain lock.
  621                  *
  622                  * o No other operations other than MUTEX_SET_WAITERS()
  623                  *   and release can modify a mutex with a non-zero
  624                  *   owner field.
  625                  *
  626                  * o The result of a successful MUTEX_SET_WAITERS() call
  627                  *   is an unbuffered write that is immediately visible
  628                  *   to all other processors in the system.
  629                  *
  630                  * o If the holding LWP switches away, it posts a store
  631                  *   fence before changing curlwp, ensuring that any
  632                  *   overwrite of the mutex waiters flag by mutex_exit()
  633                  *   completes before the modification of curlwp becomes
  634                  *   visible to this CPU.
  635                  *
  636                  * o cpu_switchto() posts a store fence after setting curlwp
  637                  *   and before resuming execution of an LWP.
  638                  *
  639                  * o _kernel_lock() posts a store fence before setting
  640                  *   curcpu()->ci_biglock_wanted, and after clearing it.
  641                  *   This ensures that any overwrite of the mutex waiters
  642                  *   flag by mutex_exit() completes before the modification
  643                  *   of ci_biglock_wanted becomes visible.
  644                  *
  645                  * We now post a read memory barrier (after setting the
  646                  * waiters field) and check the lock holder's status again.
  647                  * Some of the possible outcomes (not an exhaustive list):
  648                  *
  649                  * 1. The on-CPU check returns true: the holding LWP is
  650                  *    running again.  The lock may be released soon and
  651                  *    we should spin.  Importantly, we can't trust the
  652                  *    value of the waiters flag.
  653                  *
  654                  * 2. The on-CPU check returns false: the holding LWP is
  655                  *    not running.  We now have the opportunity to check
  656                  *    if mutex_exit() has blatted the modifications made
  657                  *    by MUTEX_SET_WAITERS().
  658                  *
  659                  * 3. The on-CPU check returns false: the holding LWP may
  660                  *    or may not be running.  It has context switched at
  661                  *    some point during our check.  Again, we have the
  662                  *    chance to see if the waiters bit is still set or
  663                  *    has been overwritten.
  664                  *
  665                  * 4. The on-CPU check returns false: the holding LWP is
  666                  *    running on a CPU, but wants the big lock.  It's OK
  667                  *    to check the waiters field in this case.
  668                  *
  669                  * 5. The has-waiters check fails: the mutex has been
  670                  *    released, the waiters flag cleared and another LWP
  671                  *    now owns the mutex.
  672                  *
  673                  * 6. The has-waiters check fails: the mutex has been
  674                  *    released.
  675                  *
  676                  * If the waiters bit is not set it's unsafe to go asleep,
  677                  * as we might never be awoken.
  678                  */
  679                 membar_consumer();
  680                 if (mutex_oncpu(owner)) {
  681                         turnstile_exit(mtx);
  682                         owner = mtx->mtx_owner;
  683                         continue;
  684                 }
  685                 membar_consumer();
  686                 if (!MUTEX_HAS_WAITERS(mtx)) {
  687                         turnstile_exit(mtx);
  688                         owner = mtx->mtx_owner;
  689                         continue;
  690                 }
  691 #endif  /* MULTIPROCESSOR */
  692 
  693                 LOCKSTAT_START_TIMER(lsflag, slptime);
  694 
  695                 turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
  696 
  697                 LOCKSTAT_STOP_TIMER(lsflag, slptime);
  698                 LOCKSTAT_COUNT(slpcnt, 1);
  699 
  700                 owner = mtx->mtx_owner;
  701         }
  702         KPREEMPT_ENABLE(curlwp);
  703 
  704         LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
  705             slpcnt, slptime);
  706         LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
  707             spincnt, spintime);
  708         LOCKSTAT_EXIT(lsflag);
  709 
  710         MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
  711         MUTEX_LOCKED(mtx);
  712 }
  713 
  714 /*
  715  * mutex_vector_exit:
  716  *
  717  *      Support routine for mutex_exit() that handles all cases.
  718  */
  719 void
  720 mutex_vector_exit(kmutex_t *mtx)
  721 {
  722         turnstile_t *ts;
  723         uintptr_t curthread;
  724 
  725         if (MUTEX_SPIN_P(mtx->mtx_owner)) {
  726 #ifdef FULL
  727                 if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) {
  728                         MUTEX_ABORT(mtx, "exiting unheld spin mutex");
  729                 }
  730                 MUTEX_UNLOCKED(mtx);
  731                 MUTEX_SPINBIT_LOCK_UNLOCK(mtx);
  732 #endif
  733                 MUTEX_SPIN_SPLRESTORE(mtx);
  734                 return;
  735         }
  736 
  737 #ifndef __HAVE_MUTEX_STUBS
  738         /*
  739          * On some architectures without mutex stubs, we can enter here to
  740          * release mutexes before interrupts and whatnot are up and running.
  741          * We need this hack to keep them sweet.
  742          */
  743         if (__predict_false(cold)) {
  744                 MUTEX_UNLOCKED(mtx);
  745                 MUTEX_RELEASE(mtx);
  746                 return;
  747         }
  748 #endif
  749 
  750         curthread = (uintptr_t)curlwp;
  751         MUTEX_DASSERT(mtx, curthread != 0);
  752         MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
  753         MUTEX_UNLOCKED(mtx);
  754 #if !defined(LOCKDEBUG)
  755         __USE(curthread);
  756 #endif
  757 
  758 #ifdef LOCKDEBUG
  759         /*
  760          * Avoid having to take the turnstile chain lock every time
  761          * around.  Raise the priority level to splhigh() in order
  762          * to disable preemption and so make the following atomic.
  763          */
  764         {
  765                 int s = splhigh();
  766                 if (!MUTEX_HAS_WAITERS(mtx)) {
  767                         MUTEX_RELEASE(mtx);
  768                         splx(s);
  769                         return;
  770                 }
  771                 splx(s);
  772         }
  773 #endif
  774 
  775         /*
  776          * Get this lock's turnstile.  This gets the interlock on
  777          * the sleep queue.  Once we have that, we can clear the
  778          * lock.  If there was no turnstile for the lock, there
  779          * were no waiters remaining.
  780          */
  781         ts = turnstile_lookup(mtx);
  782 
  783         if (ts == NULL) {
  784                 MUTEX_RELEASE(mtx);
  785                 turnstile_exit(mtx);
  786         } else {
  787                 MUTEX_RELEASE(mtx);
  788                 turnstile_wakeup(ts, TS_WRITER_Q,
  789                     TS_WAITERS(ts, TS_WRITER_Q), NULL);
  790         }
  791 }
  792 
  793 #ifndef __HAVE_SIMPLE_MUTEXES
  794 /*
  795  * mutex_wakeup:
  796  *
  797  *      Support routine for mutex_exit() that wakes up all waiters.
  798  *      We assume that the mutex has been released, but it need not
  799  *      be.
  800  */
  801 void
  802 mutex_wakeup(kmutex_t *mtx)
  803 {
  804         turnstile_t *ts;
  805 
  806         ts = turnstile_lookup(mtx);
  807         if (ts == NULL) {
  808                 turnstile_exit(mtx);
  809                 return;
  810         }
  811         MUTEX_CLEAR_WAITERS(mtx);
  812         turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
  813 }
  814 #endif  /* !__HAVE_SIMPLE_MUTEXES */
  815 
  816 /*
  817  * mutex_owned:
  818  *
  819  *      Return true if the current LWP (adaptive) or CPU (spin)
  820  *      holds the mutex.
  821  */
  822 int
  823 mutex_owned(const kmutex_t *mtx)
  824 {
  825 
  826         if (mtx == NULL)
  827                 return 0;
  828         if (MUTEX_ADAPTIVE_P(mtx->mtx_owner))
  829                 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
  830 #ifdef FULL
  831         return MUTEX_SPINBIT_LOCKED_P(mtx);
  832 #else
  833         return 1;
  834 #endif
  835 }
  836 
  837 /*
  838  * mutex_owner:
  839  *
  840  *      Return the current owner of an adaptive mutex.  Used for
  841  *      priority inheritance.
  842  */
  843 lwp_t *
  844 mutex_owner(const kmutex_t *mtx)
  845 {
  846 
  847         MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx->mtx_owner));
  848         return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
  849 }
  850 
  851 /*
  852  * mutex_owner_running:
  853  *
  854  *      Return true if an adaptive mutex is unheld, or held and the owner is
  855  *      running on a CPU.  For the pagedaemon only - do not document or use
  856  *      in other code.
  857  */
  858 bool
  859 mutex_owner_running(const kmutex_t *mtx)
  860 {
  861 #ifdef MULTIPROCESSOR
  862         uintptr_t owner;
  863         bool rv;
  864 
  865         MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx->mtx_owner));
  866         kpreempt_disable();
  867         owner = mtx->mtx_owner;
  868         rv = !MUTEX_OWNED(owner) || mutex_oncpu(MUTEX_OWNER(owner));
  869         kpreempt_enable();
  870         return rv;
  871 #else
  872         return mutex_owner(mtx) == curlwp;
  873 #endif
  874 }
  875 
  876 /*
  877  * mutex_ownable:
  878  *
  879  *      When compiled with DEBUG and LOCKDEBUG defined, ensure that
  880  *      the mutex is available.  We cannot use !mutex_owned() since
  881  *      that won't work correctly for spin mutexes.
  882  */
  883 int
  884 mutex_ownable(const kmutex_t *mtx)
  885 {
  886 
  887 #ifdef LOCKDEBUG
  888         MUTEX_TESTLOCK(mtx);
  889 #endif
  890         return 1;
  891 }
  892 
  893 /*
  894  * mutex_tryenter:
  895  *
  896  *      Try to acquire the mutex; return non-zero if we did.
  897  */
  898 int
  899 mutex_tryenter(kmutex_t *mtx)
  900 {
  901         uintptr_t curthread;
  902 
  903         /*
  904          * Handle spin mutexes.
  905          */
  906         if (MUTEX_SPIN_P(mtx->mtx_owner)) {
  907                 MUTEX_SPIN_SPLRAISE(mtx);
  908 #ifdef FULL
  909                 if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
  910                         MUTEX_WANTLOCK(mtx);
  911                         MUTEX_LOCKED(mtx);
  912                         return 1;
  913                 }
  914                 MUTEX_SPIN_SPLRESTORE(mtx);
  915 #else
  916                 MUTEX_WANTLOCK(mtx);
  917                 MUTEX_LOCKED(mtx);
  918                 return 1;
  919 #endif
  920         } else {
  921                 curthread = (uintptr_t)curlwp;
  922                 MUTEX_ASSERT(mtx, curthread != 0);
  923                 if (MUTEX_ACQUIRE(mtx, curthread)) {
  924                         MUTEX_WANTLOCK(mtx);
  925                         MUTEX_LOCKED(mtx);
  926                         MUTEX_DASSERT(mtx,
  927                             MUTEX_OWNER(mtx->mtx_owner) == curthread);
  928                         return 1;
  929                 }
  930         }
  931 
  932         return 0;
  933 }
  934 
  935 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
  936 /*
  937  * mutex_spin_retry:
  938  *
  939  *      Support routine for mutex_spin_enter().  Assumes that the caller
  940  *      has already raised the SPL, and adjusted counters.
  941  */
  942 void
  943 mutex_spin_retry(kmutex_t *mtx)
  944 {
  945 #ifdef MULTIPROCESSOR
  946         u_int count;
  947         LOCKSTAT_TIMER(spintime);
  948         LOCKSTAT_FLAG(lsflag);
  949 #ifdef LOCKDEBUG
  950         u_int spins = 0;
  951 #endif  /* LOCKDEBUG */
  952 
  953         MUTEX_WANTLOCK(mtx);
  954 
  955         LOCKSTAT_ENTER(lsflag);
  956         LOCKSTAT_START_TIMER(lsflag, spintime);
  957         count = SPINLOCK_BACKOFF_MIN;
  958 
  959         /*
  960          * Spin testing the lock word and do exponential backoff
  961          * to reduce cache line ping-ponging between CPUs.
  962          */
  963         do {
  964                 while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
  965                         SPINLOCK_BACKOFF(count);
  966 #ifdef LOCKDEBUG
  967                         if (SPINLOCK_SPINOUT(spins))
  968                                 MUTEX_ABORT(mtx, "spinout");
  969 #endif  /* LOCKDEBUG */
  970                 }
  971         } while (!MUTEX_SPINBIT_LOCK_TRY(mtx));
  972 
  973         LOCKSTAT_STOP_TIMER(lsflag, spintime);
  974         LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
  975         LOCKSTAT_EXIT(lsflag);
  976 
  977         MUTEX_LOCKED(mtx);
  978 #else   /* MULTIPROCESSOR */
  979         MUTEX_ABORT(mtx, "locking against myself");
  980 #endif  /* MULTIPROCESSOR */
  981 }
  982 #endif  /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */

Cache object: 25fb8f612629c0916736e27ad125850b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.