The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_sx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice(s), this list of conditions and the following disclaimer as
    9  *    the first lines of this file unmodified other than the possible 
   10  *    addition of one or more copyright notices.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice(s), this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
   16  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
   17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
   18  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
   19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
   22  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
   25  * DAMAGE.
   26  *
   27  * $FreeBSD: releng/5.1/sys/kern/kern_sx.c 93812 2002-04-04 20:49:35Z jhb $
   28  */
   29 
   30 /*
   31  * Shared/exclusive locks.  This implementation assures deterministic lock
   32  * granting behavior, so that slocks and xlocks are interleaved.
   33  *
   34  * Priority propagation will not generally raise the priority of lock holders,
   35  * so should not be relied upon in combination with sx locks.
   36  */
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/ktr.h>
   41 #include <sys/condvar.h>
   42 #include <sys/lock.h>
   43 #include <sys/mutex.h>
   44 #include <sys/sx.h>
   45 
   46 struct lock_class lock_class_sx = {
   47         "sx",
   48         LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE
   49 };
   50 
   51 #ifndef INVARIANTS
   52 #define _sx_assert(sx, what, file, line)
   53 #endif
   54 
   55 void
   56 sx_sysinit(void *arg)
   57 {
   58         struct sx_args *sargs = arg;
   59 
   60         sx_init(sargs->sa_sx, sargs->sa_desc);
   61 }
   62 
   63 void
   64 sx_init(struct sx *sx, const char *description)
   65 {
   66         struct lock_object *lock;
   67 
   68         lock = &sx->sx_object;
   69         KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
   70             ("sx lock %s %p already initialized", description, sx));
   71         bzero(sx, sizeof(*sx));
   72         lock->lo_class = &lock_class_sx;
   73         lock->lo_type = lock->lo_name = description;
   74         lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
   75             LO_UPGRADABLE;
   76         sx->sx_lock = mtx_pool_find(sx);
   77         sx->sx_cnt = 0;
   78         cv_init(&sx->sx_shrd_cv, description);
   79         sx->sx_shrd_wcnt = 0;
   80         cv_init(&sx->sx_excl_cv, description);
   81         sx->sx_excl_wcnt = 0;
   82         sx->sx_xholder = NULL;
   83 
   84         LOCK_LOG_INIT(lock, 0);
   85 
   86         WITNESS_INIT(lock);
   87 }
   88 
   89 void
   90 sx_destroy(struct sx *sx)
   91 {
   92 
   93         LOCK_LOG_DESTROY(&sx->sx_object, 0);
   94 
   95         KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
   96             0), ("%s (%s): holders or waiters\n", __func__,
   97             sx->sx_object.lo_name));
   98 
   99         sx->sx_lock = NULL;
  100         cv_destroy(&sx->sx_shrd_cv);
  101         cv_destroy(&sx->sx_excl_cv);
  102 
  103         WITNESS_DESTROY(&sx->sx_object);
  104 }
  105 
  106 void
  107 _sx_slock(struct sx *sx, const char *file, int line)
  108 {
  109 
  110         mtx_lock(sx->sx_lock);
  111         KASSERT(sx->sx_xholder != curthread,
  112             ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
  113             sx->sx_object.lo_name, file, line));
  114 
  115         /*
  116          * Loop in case we lose the race for lock acquisition.
  117          */
  118         while (sx->sx_cnt < 0) {
  119                 sx->sx_shrd_wcnt++;
  120                 cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
  121                 sx->sx_shrd_wcnt--;
  122         }
  123 
  124         /* Acquire a shared lock. */
  125         sx->sx_cnt++;
  126 
  127         LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
  128         WITNESS_LOCK(&sx->sx_object, 0, file, line);
  129 
  130         mtx_unlock(sx->sx_lock);
  131 }
  132 
  133 int
  134 _sx_try_slock(struct sx *sx, const char *file, int line)
  135 {
  136 
  137         mtx_lock(sx->sx_lock);
  138         if (sx->sx_cnt >= 0) {
  139                 sx->sx_cnt++;
  140                 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
  141                 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
  142                 mtx_unlock(sx->sx_lock);
  143                 return (1);
  144         } else {
  145                 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
  146                 mtx_unlock(sx->sx_lock);
  147                 return (0);
  148         }
  149 }
  150 
  151 void
  152 _sx_xlock(struct sx *sx, const char *file, int line)
  153 {
  154 
  155         mtx_lock(sx->sx_lock);
  156 
  157         /*
  158          * With sx locks, we're absolutely not permitted to recurse on
  159          * xlocks, as it is fatal (deadlock). Normally, recursion is handled
  160          * by WITNESS, but as it is not semantically correct to hold the
  161          * xlock while in here, we consider it API abuse and put it under
  162          * INVARIANTS.
  163          */
  164         KASSERT(sx->sx_xholder != curthread,
  165             ("%s (%s): xlock already held @ %s:%d", __func__,
  166             sx->sx_object.lo_name, file, line));
  167 
  168         /* Loop in case we lose the race for lock acquisition. */
  169         while (sx->sx_cnt != 0) {
  170                 sx->sx_excl_wcnt++;
  171                 cv_wait(&sx->sx_excl_cv, sx->sx_lock);
  172                 sx->sx_excl_wcnt--;
  173         }
  174 
  175         MPASS(sx->sx_cnt == 0);
  176 
  177         /* Acquire an exclusive lock. */
  178         sx->sx_cnt--;
  179         sx->sx_xholder = curthread;
  180 
  181         LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
  182         WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
  183 
  184         mtx_unlock(sx->sx_lock);
  185 }
  186 
  187 int
  188 _sx_try_xlock(struct sx *sx, const char *file, int line)
  189 {
  190 
  191         mtx_lock(sx->sx_lock);
  192         if (sx->sx_cnt == 0) {
  193                 sx->sx_cnt--;
  194                 sx->sx_xholder = curthread;
  195                 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
  196                 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
  197                     line);
  198                 mtx_unlock(sx->sx_lock);
  199                 return (1);
  200         } else {
  201                 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
  202                 mtx_unlock(sx->sx_lock);
  203                 return (0);
  204         }
  205 }
  206 
  207 void
  208 _sx_sunlock(struct sx *sx, const char *file, int line)
  209 {
  210 
  211         _sx_assert(sx, SX_SLOCKED, file, line);
  212         mtx_lock(sx->sx_lock);
  213 
  214         WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
  215 
  216         /* Release. */
  217         sx->sx_cnt--;
  218 
  219         /*
  220          * If we just released the last shared lock, wake any waiters up, giving
  221          * exclusive lockers precedence.  In order to make sure that exclusive
  222          * lockers won't be blocked forever, don't wake shared lock waiters if
  223          * there are exclusive lock waiters.
  224          */
  225         if (sx->sx_excl_wcnt > 0) {
  226                 if (sx->sx_cnt == 0)
  227                         cv_signal(&sx->sx_excl_cv);
  228         } else if (sx->sx_shrd_wcnt > 0)
  229                 cv_broadcast(&sx->sx_shrd_cv);
  230 
  231         LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
  232 
  233         mtx_unlock(sx->sx_lock);
  234 }
  235 
  236 void
  237 _sx_xunlock(struct sx *sx, const char *file, int line)
  238 {
  239 
  240         _sx_assert(sx, SX_XLOCKED, file, line);
  241         mtx_lock(sx->sx_lock);
  242         MPASS(sx->sx_cnt == -1);
  243 
  244         WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
  245 
  246         /* Release. */
  247         sx->sx_cnt++;
  248         sx->sx_xholder = NULL;
  249 
  250         /*
  251          * Wake up waiters if there are any.  Give precedence to slock waiters.
  252          */
  253         if (sx->sx_shrd_wcnt > 0)
  254                 cv_broadcast(&sx->sx_shrd_cv);
  255         else if (sx->sx_excl_wcnt > 0)
  256                 cv_signal(&sx->sx_excl_cv);
  257 
  258         LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
  259 
  260         mtx_unlock(sx->sx_lock);
  261 }
  262 
  263 int
  264 _sx_try_upgrade(struct sx *sx, const char *file, int line)
  265 {
  266 
  267         _sx_assert(sx, SX_SLOCKED, file, line);
  268         mtx_lock(sx->sx_lock);
  269 
  270         if (sx->sx_cnt == 1) {
  271                 sx->sx_cnt = -1;
  272                 sx->sx_xholder = curthread;
  273 
  274                 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
  275                 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
  276                     file, line);
  277 
  278                 mtx_unlock(sx->sx_lock);
  279                 return (1);
  280         } else {
  281                 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
  282                 mtx_unlock(sx->sx_lock);
  283                 return (0);
  284         }
  285 }
  286 
  287 void
  288 _sx_downgrade(struct sx *sx, const char *file, int line)
  289 {
  290 
  291         _sx_assert(sx, SX_XLOCKED, file, line);
  292         mtx_lock(sx->sx_lock);
  293         MPASS(sx->sx_cnt == -1);
  294 
  295         WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
  296 
  297         sx->sx_cnt = 1;
  298         sx->sx_xholder = NULL;
  299         if (sx->sx_shrd_wcnt > 0)
  300                 cv_broadcast(&sx->sx_shrd_cv);
  301 
  302         LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
  303 
  304         mtx_unlock(sx->sx_lock);
  305 }
  306 
  307 #ifdef INVARIANT_SUPPORT
  308 #ifndef INVARIANTS
  309 #undef  _sx_assert
  310 #endif
  311 
  312 /*
  313  * In the non-WITNESS case, sx_assert() can only detect that at least
  314  * *some* thread owns an slock, but it cannot guarantee that *this*
  315  * thread owns an slock.
  316  */
  317 void
  318 _sx_assert(struct sx *sx, int what, const char *file, int line)
  319 {
  320 
  321         switch (what) {
  322         case SX_LOCKED:
  323         case SX_SLOCKED:
  324 #ifdef WITNESS
  325                 witness_assert(&sx->sx_object, what, file, line);
  326 #else
  327                 mtx_lock(sx->sx_lock);
  328                 if (sx->sx_cnt <= 0 &&
  329                     (what == SX_SLOCKED || sx->sx_xholder != curthread))
  330                         printf("Lock %s not %slocked @ %s:%d\n",
  331                             sx->sx_object.lo_name, (what == SX_SLOCKED) ?
  332                             "share " : "", file, line);
  333                 mtx_unlock(sx->sx_lock);
  334 #endif
  335                 break;
  336         case SX_XLOCKED:
  337                 mtx_lock(sx->sx_lock);
  338                 if (sx->sx_xholder != curthread)
  339                         printf("Lock %s not exclusively locked @ %s:%d\n",
  340                             sx->sx_object.lo_name, file, line);
  341                 mtx_unlock(sx->sx_lock);
  342                 break;
  343         default:
  344                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
  345                     line);
  346         }
  347 }
  348 #endif  /* INVARIANT_SUPPORT */

Cache object: 1be09144b87475089e5a3c9309080594


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.