The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sys/mutex2.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
    3  *
    4  * This code is derived from software contributed to The DragonFly Project
    5  * by Matthew Dillon <dillon@backplane.com>
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  *
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in
   15  *    the documentation and/or other materials provided with the
   16  *    distribution.
   17  * 3. Neither the name of The DragonFly Project nor the names of its
   18  *    contributors may be used to endorse or promote products derived
   19  *    from this software without specific, prior written permission.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
   24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
   25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
   27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
   28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
   29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
   30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
   31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   32  * SUCH DAMAGE.
   33  */
   34 
   35 #ifndef _SYS_MUTEX2_H_
   36 #define _SYS_MUTEX2_H_
   37 
   38 #ifndef _SYS_MUTEX_H_
   39 #include <sys/mutex.h>
   40 #endif
   41 #ifndef _SYS_THREAD2_H_
   42 #include <sys/thread2.h>
   43 #endif
   44 #ifndef _SYS_GLOBALDATA_H_
   45 #include <sys/globaldata.h>
   46 #endif
   47 #include <machine/atomic.h>
   48 
   49 /*
   50  * Initialize a new mutex, placing it in an unlocked state with no refs.
   51  */
   52 static __inline void
   53 mtx_init(mtx_t mtx)
   54 {
   55         mtx->mtx_lock = 0;
   56         mtx->mtx_refs = 0;
   57         mtx->mtx_owner = NULL;
   58         mtx->mtx_link = NULL;
   59 }
   60 
   61 static __inline void
   62 mtx_link_init(mtx_link_t link)
   63 {
   64         link->state = MTX_LINK_IDLE;
   65 }
   66 
   67 /*
   68  * Deinitialize a mutex
   69  */
   70 static __inline void
   71 mtx_uninit(mtx_t mtx)
   72 {
   73         /* empty */
   74 }
   75 
   76 /*
   77  * Exclusive-lock a mutex, block until acquired or aborted.  Recursion
   78  * is allowed.
   79  *
   80  * This version of the function allows the mtx_link to be passed in, thus
   81  * giving the caller visibility for the link structure which is required
   82  * when calling mtx_abort_ex_link().
   83  *
   84  * The mutex may be aborted at any time while the passed link structure
   85  * is valid.
   86  */
   87 static __inline int
   88 mtx_lock_ex_link(mtx_t mtx, struct mtx_link *link,
   89                  const char *ident, int flags, int to)
   90 {
   91         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
   92                 return(_mtx_lock_ex_link(mtx, link, ident, flags, to));
   93         mtx->mtx_owner = curthread;
   94         return(0);
   95 }
   96 
   97 /*
   98  * Short-form exclusive-lock a mutex, block until acquired.  Recursion is
   99  * allowed.  This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
  100  */
  101 static __inline void
  102 mtx_lock(mtx_t mtx)
  103 {
  104         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
  105                 _mtx_lock_ex(mtx, "mtxex", 0, 0);
  106                 return;
  107         }
  108         mtx->mtx_owner = curthread;
  109 }
  110 
  111 /*
  112  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
  113  *
  114  * Returns 0 on success, or the tsleep() return code on failure.
  115  * An error can only be returned if PCATCH is specified in the flags.
  116  */
  117 static __inline int
  118 mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
  119 {
  120         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
  121                 return(_mtx_lock_ex(mtx, ident, flags, to));
  122         mtx->mtx_owner = curthread;
  123         return(0);
  124 }
  125 
  126 static __inline int
  127 mtx_lock_ex_quick(mtx_t mtx, const char *ident)
  128 {
  129         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
  130                 return(_mtx_lock_ex_quick(mtx, ident));
  131         mtx->mtx_owner = curthread;
  132         return(0);
  133 }
  134 
  135 /*
  136  * Share-lock a mutex, block until acquired.  Recursion is allowed.
  137  *
  138  * Returns 0 on success, or the tsleep() return code on failure.
  139  * An error can only be returned if PCATCH is specified in the flags.
  140  */
  141 static __inline int
  142 mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
  143 {
  144         if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
  145                 return(_mtx_lock_sh(mtx, ident, flags, to));
  146         return(0);
  147 }
  148 
  149 static __inline int
  150 mtx_lock_sh_quick(mtx_t mtx, const char *ident)
  151 {
  152         if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
  153                 return(_mtx_lock_sh_quick(mtx, ident));
  154         return(0);
  155 }
  156 
  157 /*
  158  * Short-form exclusive spinlock a mutex.  Must be paired with
  159  * mtx_spinunlock().
  160  */
  161 static __inline void
  162 mtx_spinlock(mtx_t mtx)
  163 {
  164         globaldata_t gd = mycpu;
  165 
  166         /*
  167          * Predispose a hard critical section
  168          */
  169         ++gd->gd_curthread->td_critcount;
  170         cpu_ccfence();
  171         ++gd->gd_spinlocks;
  172 
  173         /*
  174          * If we cannot get it trivially get it the hard way.
  175          *
  176          * Note that mtx_owner will be set twice if we fail to get it
  177          * trivially, but there's no point conditionalizing it as a
  178          * conditional will be slower.
  179          */
  180         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
  181                 _mtx_spinlock(mtx);
  182         mtx->mtx_owner = gd->gd_curthread;
  183 }
  184 
  185 static __inline int
  186 mtx_spinlock_try(mtx_t mtx)
  187 {
  188         globaldata_t gd = mycpu;
  189 
  190         /*
  191          * Predispose a hard critical section
  192          */
  193         ++gd->gd_curthread->td_critcount;
  194         cpu_ccfence();
  195         ++gd->gd_spinlocks;
  196 
  197         /*
  198          * If we cannot get it trivially call _mtx_spinlock_try().  This
  199          * function will clean up the hard critical section if it fails.
  200          */
  201         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
  202                 return(_mtx_spinlock_try(mtx));
  203         mtx->mtx_owner = gd->gd_curthread;
  204         return (0);
  205 }
  206 
  207 /*
  208  * Short-form exclusive-lock a mutex, spin until acquired.  Recursion is
  209  * allowed.  This form is identical to mtx_spinlock_ex().
  210  *
  211  * Attempt to exclusive-lock a mutex, return 0 on success and
  212  * EAGAIN on failure.
  213  */
  214 static __inline int
  215 mtx_lock_ex_try(mtx_t mtx)
  216 {
  217         if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
  218                 return (_mtx_lock_ex_try(mtx));
  219         mtx->mtx_owner = curthread;
  220         return (0);
  221 }
  222 
  223 /*
  224  * Attempt to share-lock a mutex, return 0 on success and
  225  * EAGAIN on failure.
  226  */
  227 static __inline int
  228 mtx_lock_sh_try(mtx_t mtx)
  229 {
  230         if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
  231                 return (_mtx_lock_sh_try(mtx));
  232         return (0);
  233 }
  234 
  235 /*
  236  * If the lock is held exclusively it must be owned by the caller.  If the
  237  * lock is already a shared lock this operation is a NOP.    A panic will
  238  * occur if the lock is not held either shared or exclusive.
  239  *
  240  * The exclusive count is converted to a shared count.
  241  */
  242 static __inline void
  243 mtx_downgrade(mtx_t mtx)
  244 {
  245         mtx->mtx_owner = NULL;
  246         if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 0) == 0)
  247                 _mtx_downgrade(mtx);
  248 }
  249 
  250 /*
  251  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
  252  * the shared lock has a count other then 1.  Optimize the most likely case
  253  * but note that a single cmpset can fail due to WANTED races.
  254  *
  255  * If the lock is held exclusively it must be owned by the caller and
  256  * this function will simply return without doing anything.  A panic will
  257  * occur if the lock is held exclusively by someone other then the caller.
  258  *
  259  * Returns 0 on success, EDEADLK on failure.
  260  */
  261 static __inline int
  262 mtx_upgrade_try(mtx_t mtx)
  263 {
  264         if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1))
  265                 return(0);
  266         return (_mtx_upgrade_try(mtx));
  267 }
  268 
  269 /*
  270  * Optimized unlock cases.
  271  *
  272  * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
  273  *       both blocking and spin methods.
  274  *
  275  *       The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
  276  *       mutexes and produce less code, but it is ok for code to just use
  277  *       mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
  278  *       or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
  279  */
  280 static __inline void
  281 mtx_unlock(mtx_t mtx)
  282 {
  283         u_int lock = mtx->mtx_lock;
  284 
  285         if (lock == (MTX_EXCLUSIVE | 1)) {
  286                 mtx->mtx_owner = NULL;
  287                 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
  288                         _mtx_unlock(mtx);
  289         } else if (lock == 1) {
  290                 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
  291                         _mtx_unlock(mtx);
  292         } else {
  293                 _mtx_unlock(mtx);
  294         }
  295 }
  296 
  297 static __inline void
  298 mtx_unlock_ex(mtx_t mtx)
  299 {
  300         u_int lock = mtx->mtx_lock;
  301 
  302         if (lock == (MTX_EXCLUSIVE | 1)) {
  303                 mtx->mtx_owner = NULL;
  304                 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
  305                         _mtx_unlock(mtx);
  306         } else {
  307                 _mtx_unlock(mtx);
  308         }
  309 }
  310 
  311 static __inline void
  312 mtx_unlock_sh(mtx_t mtx)
  313 {
  314         if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
  315                 _mtx_unlock(mtx);
  316 }
  317 
  318 /*
  319  * NOTE: spinlocks are exclusive-only
  320  */
  321 static __inline void
  322 mtx_spinunlock(mtx_t mtx)
  323 {
  324         globaldata_t gd = mycpu;
  325 
  326         mtx_unlock(mtx);
  327 
  328         --gd->gd_spinlocks;
  329         cpu_ccfence();
  330         --gd->gd_curthread->td_critcount;
  331 }
  332 
  333 /*
  334  * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
  335  * anyone, including the owner.
  336  */
  337 static __inline int
  338 mtx_islocked(mtx_t mtx)
  339 {
  340         return(mtx->mtx_lock != 0);
  341 }
  342 
  343 /*
  344  * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
  345  * including the owner.
  346  *
  347  * The mutex may in an unlocked or shared lock state.
  348  */
  349 static __inline int
  350 mtx_islocked_ex(mtx_t mtx)
  351 {
  352         return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
  353 }
  354 
  355 /*
  356  * Return TRUE (non-zero) if the mutex is not locked.
  357  */
  358 static __inline int
  359 mtx_notlocked(mtx_t mtx)
  360 {
  361         return(mtx->mtx_lock == 0);
  362 }
  363 
  364 /*
  365  * Return TRUE (non-zero) if the mutex is not locked exclusively.
  366  * The mutex may in an unlocked or shared lock state.
  367  */
  368 static __inline int
  369 mtx_notlocked_ex(mtx_t mtx)
  370 {
  371         return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
  372 }
  373 
  374 /*
  375  * Return TRUE (non-zero) if the mutex is exclusively locked by
  376  * the caller.
  377  */
  378 static __inline int
  379 mtx_owned(mtx_t mtx)
  380 {
  381         return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
  382 }
  383 
  384 /*
  385  * Return TRUE (non-zero) if the mutex is not exclusively locked by
  386  * the caller.
  387  */
  388 static __inline int
  389 mtx_notowned(mtx_t mtx)
  390 {
  391         return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
  392                mtx->mtx_owner != curthread);
  393 }
  394 
  395 /*
  396  * Return the shared or exclusive lock count.  A return value of 0
  397  * indicate that the mutex is not locked.
  398  *
  399  * NOTE: If the mutex is held exclusively by someone other then the
  400  *       caller the lock count for the other owner is still returned.
  401  */
  402 static __inline int
  403 mtx_lockrefs(mtx_t mtx)
  404 {
  405         return(mtx->mtx_lock & MTX_MASK);
  406 }
  407 
  408 /*
  409  * Bump the lock's ref count.  This field is independent of the lock.
  410  */
  411 static __inline void
  412 mtx_hold(mtx_t mtx)
  413 {
  414         atomic_add_acq_int(&mtx->mtx_refs, 1);
  415 }
  416 
  417 /*
  418  * Drop the lock's ref count.  This field is independent of the lock.
  419  *
  420  * Returns the previous ref count, interlocked so testing against
  421  * 1 means you won the 1->0 transition
  422  */
  423 static __inline int
  424 mtx_drop(mtx_t mtx)
  425 {
  426         return (atomic_fetchadd_int(&mtx->mtx_refs, -1));
  427 }
  428 
  429 #endif

Cache object: dfe6e46c6bc9d4918f7825681eb3c950


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.