The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_localcount.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: subr_localcount.c,v 1.7 2017/11/17 09:26:36 ozaki-r Exp $      */
    2 
    3 /*-
    4  * Copyright (c) 2016 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Taylor R. Campbell.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * CPU-local reference counts
   34  *
   35  *      localcount(9) is a reference-counting scheme that involves no
   36  *      interprocessor synchronization most of the time, at the cost of
   37  *      eight bytes of memory per CPU per object and at the cost of
   38  *      expensive interprocessor synchronization to drain references.
   39  *
   40  *      localcount(9) references may be held across sleeps, may be
   41  *      transferred from CPU to CPU or thread to thread: they behave
   42  *      semantically like typical reference counts, with different
   43  *      pragmatic performance characteristics.
   44  */
   45 
   46 #include <sys/cdefs.h>
   47 __KERNEL_RCSID(0, "$NetBSD: subr_localcount.c,v 1.7 2017/11/17 09:26:36 ozaki-r Exp $");
   48 
   49 #include <sys/param.h>
   50 #include <sys/localcount.h>
   51 #include <sys/types.h>
   52 #include <sys/condvar.h>
   53 #include <sys/errno.h>
   54 #include <sys/mutex.h>
   55 #include <sys/percpu.h>
   56 #include <sys/xcall.h>
   57 #if defined(DEBUG) && defined(LOCKDEBUG)
   58 #include <sys/atomic.h>
   59 #endif
   60 
   61 static void localcount_xc(void *, void *);
   62 
   63 /*
   64  * localcount_init(lc)
   65  *
   66  *      Initialize a localcount object.  Returns 0 on success, error
   67  *      code on failure.  May fail to allocate memory for percpu(9).
   68  *
   69  *      The caller must call localcount_drain and then localcount_fini
   70  *      when done with lc.
   71  */
   72 void
   73 localcount_init(struct localcount *lc)
   74 {
   75 
   76         lc->lc_totalp = NULL;
   77         lc->lc_percpu = percpu_alloc(sizeof(int64_t));
   78 }
   79 
   80 /*
   81  * localcount_drain(lc, cv, interlock)
   82  *
   83  *      Wait for all acquired references to lc to drain.  Caller must
   84  *      hold interlock; localcount_drain releases it during cross-calls
   85  *      and waits on cv.  The cv and interlock passed here must be the
   86  *      same as are passed to localcount_release for this lc.
   87  *
   88  *      Caller must guarantee that no new references can be acquired
   89  *      with localcount_acquire before calling localcount_drain.  For
   90  *      example, any object that may be found in a list and acquired
   91  *      must be removed from the list before localcount_drain.
   92  *
   93  *      The localcount object lc may be used only with localcount_fini
   94  *      after this, unless reinitialized after localcount_fini with
   95  *      localcount_init.
   96  */
   97 void
   98 localcount_drain(struct localcount *lc, kcondvar_t *cv, kmutex_t *interlock)
   99 {
  100         int64_t total = 0;
  101 
  102         KASSERT(mutex_owned(interlock));
  103         KASSERT(lc->lc_totalp == NULL);
  104 
  105         /* Mark it draining.  */
  106         lc->lc_totalp = &total;
  107 
  108         /*
  109          * Count up all references on all CPUs.
  110          *
  111          * This serves as a global memory barrier: after xc_wait, all
  112          * CPUs will have witnessed the nonnull value of lc->lc_totalp,
  113          * so that it is safe to wait on the cv for them.
  114          */
  115         mutex_exit(interlock);
  116         xc_wait(xc_broadcast(0, &localcount_xc, lc, interlock));
  117         mutex_enter(interlock);
  118 
  119         /* Wait for remaining references to drain.  */
  120         while (total != 0) {
  121                 /*
  122                  * At this point, now that we have added up all
  123                  * references on all CPUs, the total had better be
  124                  * nonnegative.
  125                  */
  126                 KASSERTMSG((0 < total),
  127                     "negatively referenced localcount: %p, %"PRId64,
  128                     lc, total);
  129                 cv_wait(cv, interlock);
  130         }
  131 
  132         /* Paranoia: Cause any further use of lc->lc_totalp to crash.  */
  133         lc->lc_totalp = (void *)(uintptr_t)1;
  134 }
  135 
  136 /*
  137  * localcount_fini(lc)
  138  *
  139  *      Finalize a localcount object, releasing any memory allocated
  140  *      for it.  The localcount object must already have been drained.
  141  */
  142 void
  143 localcount_fini(struct localcount *lc)
  144 {
  145 
  146         KASSERT(lc->lc_totalp == (void *)(uintptr_t)1);
  147         percpu_free(lc->lc_percpu, sizeof(uint64_t));
  148 }
  149 
  150 /*
  151  * localcount_xc(cookie0, cookie1)
  152  *
  153  *      Accumulate and transfer the per-CPU reference counts to a
  154  *      global total, resetting the per-CPU counter to zero.  Once
  155  *      localcount_drain() has started, we only maintain the total
  156  *      count in localcount_release().
  157  */
  158 static void
  159 localcount_xc(void *cookie0, void *cookie1)
  160 {
  161         struct localcount *lc = cookie0;
  162         kmutex_t *interlock = cookie1;
  163         int64_t *localp;
  164 
  165         mutex_enter(interlock);
  166         localp = percpu_getref(lc->lc_percpu);
  167         *lc->lc_totalp += *localp;
  168         *localp -= *localp;             /* ie, *localp = 0; */
  169         percpu_putref(lc->lc_percpu);
  170         mutex_exit(interlock);
  171 }
  172 
  173 /*
  174  * localcount_adjust(lc, delta)
  175  *
  176  *      Add delta -- positive or negative -- to the local CPU's count
  177  *      for lc.
  178  */
  179 static void
  180 localcount_adjust(struct localcount *lc, int delta)
  181 {
  182         int64_t *localp;
  183 
  184         localp = percpu_getref(lc->lc_percpu);
  185         *localp += delta;
  186         percpu_putref(lc->lc_percpu);
  187 }
  188 
  189 /*
  190  * localcount_acquire(lc)
  191  *
  192  *      Acquire a reference to lc.
  193  *
  194  *      The reference may be held across sleeps and may be migrated
  195  *      from CPU to CPU, or even thread to thread -- it is only
  196  *      counted, not associated with a particular concrete owner.
  197  *
  198  *      Involves no interprocessor synchronization.  May be used in any
  199  *      context: while a lock is held, within a pserialize(9) read
  200  *      section, in hard interrupt context (provided other users block
  201  *      hard interrupts), in soft interrupt context, in thread context,
  202  *      &c.
  203  *
  204  *      Caller must guarantee that there is no concurrent
  205  *      localcount_drain.  For example, any object that may be found in
  206  *      a list and acquired must be removed from the list before
  207  *      localcount_drain.
  208  */
  209 void
  210 localcount_acquire(struct localcount *lc)
  211 {
  212 
  213         KASSERT(lc->lc_totalp == NULL);
  214         localcount_adjust(lc, +1);
  215 #if defined(DEBUG) && defined(LOCKDEBUG)
  216         if (atomic_inc_32_nv(&lc->lc_refcnt) == 0)
  217                 panic("counter overflow");
  218 #endif
  219 }
  220 
  221 /*
  222  * localcount_release(lc, cv, interlock)
  223  *
  224  *      Release a reference to lc.  If there is a concurrent
  225  *      localcount_drain and this may be the last reference, notify
  226  *      localcount_drain by acquiring interlock, waking cv, and
  227  *      releasing interlock.  The cv and interlock passed here must be
  228  *      the same as are passed to localcount_drain for this lc.
  229  *
  230  *      Involves no interprocessor synchronization unless there is a
  231  *      concurrent localcount_drain in progress.
  232  */
  233 void
  234 localcount_release(struct localcount *lc, kcondvar_t *cv, kmutex_t *interlock)
  235 {
  236 
  237         /*
  238          * Block xcall so that if someone begins draining after we see
  239          * lc->lc_totalp as null, then they won't start cv_wait until
  240          * after they have counted this CPU's contributions.
  241          *
  242          * Otherwise, localcount_drain may notice an extant reference
  243          * from this CPU and cv_wait for it, but having seen
  244          * lc->lc_totalp as null, this CPU will not wake
  245          * localcount_drain.
  246          */
  247         kpreempt_disable();
  248 
  249         KDASSERT(mutex_ownable(interlock));
  250         if (__predict_false(lc->lc_totalp != NULL)) {
  251                 /*
  252                  * Slow path -- wake localcount_drain in case this is
  253                  * the last reference.
  254                  */
  255                 mutex_enter(interlock);
  256                 if (--*lc->lc_totalp == 0)
  257                         cv_broadcast(cv);
  258                 mutex_exit(interlock);
  259                 goto out;
  260         }
  261 
  262         localcount_adjust(lc, -1);
  263 #if defined(DEBUG) && defined(LOCKDEBUG)
  264         if (atomic_dec_32_nv(&lc->lc_refcnt) == UINT_MAX)
  265                 panic("counter underflow");
  266 #endif
  267  out:   kpreempt_enable();
  268 }
  269 
  270 /*
  271  * localcount_debug_refcnt(lc)
  272  *
  273  *      Return a total reference count of lc.  It returns a correct value
  274  *      only if DEBUG and LOCKDEBUG enabled.  Otherwise always return 0.
  275  */
  276 uint32_t
  277 localcount_debug_refcnt(const struct localcount *lc)
  278 {
  279 
  280 #if defined(DEBUG) && defined(LOCKDEBUG)
  281         return lc->lc_refcnt;
  282 #else
  283         return 0;
  284 #endif
  285 }

Cache object: 32b6643d4583bad1cdd12270ee51010b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.