The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/ck/include/spinlock/hclh.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright 2013-2015 Olivier Houchard
    3  * Copyright 2010-2015 Samy Al Bahra.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #ifndef CK_SPINLOCK_HCLH_H
   29 #define CK_SPINLOCK_HCLH_H
   30 
   31 #include <ck_cc.h>
   32 #include <ck_pr.h>
   33 #include <ck_stdbool.h>
   34 #include <ck_stddef.h>
   35 
   36 #ifndef CK_F_SPINLOCK_HCLH
   37 #define CK_F_SPINLOCK_HCLH
   38 struct ck_spinlock_hclh {
   39         unsigned int wait;
   40         unsigned int splice;
   41         int cluster_id;
   42         struct ck_spinlock_hclh *previous;
   43 };
   44 typedef struct ck_spinlock_hclh ck_spinlock_hclh_t;
   45 
   46 CK_CC_INLINE static void
   47 ck_spinlock_hclh_init(struct ck_spinlock_hclh **lock,
   48     struct ck_spinlock_hclh *unowned,
   49     int cluster_id)
   50 {
   51 
   52         unowned->previous = NULL;
   53         unowned->wait = false;
   54         unowned->splice = false;
   55         unowned->cluster_id = cluster_id;
   56         *lock = unowned;
   57         ck_pr_barrier();
   58         return;
   59 }
   60 
   61 CK_CC_INLINE static bool
   62 ck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue)
   63 {
   64         struct ck_spinlock_hclh *head;
   65         bool r;
   66 
   67         head = ck_pr_load_ptr(queue);
   68         r = ck_pr_load_uint(&head->wait);
   69         ck_pr_fence_acquire();
   70         return r;
   71 }
   72 
   73 CK_CC_INLINE static void
   74 ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
   75     struct ck_spinlock_hclh **local_queue,
   76     struct ck_spinlock_hclh *thread)
   77 {
   78         struct ck_spinlock_hclh *previous, *local_tail;
   79 
   80         /* Indicate to the next thread on queue that they will have to block. */
   81         thread->wait = true;
   82         thread->splice = false;
   83         thread->cluster_id = (*local_queue)->cluster_id;
   84         /* Make sure previous->previous doesn't appear to be NULL */
   85         thread->previous = *local_queue;
   86 
   87         /* Serialize with respect to update of local queue. */
   88         ck_pr_fence_store_atomic();
   89 
   90         /* Mark current request as last request. Save reference to previous request. */
   91         previous = ck_pr_fas_ptr(local_queue, thread);
   92         thread->previous = previous;
   93 
   94         /* Wait until previous thread from the local queue is done with lock. */
   95         ck_pr_fence_load();
   96         if (previous->previous != NULL) {
   97                 while (ck_pr_load_uint(&previous->wait) == true &&
   98                         ck_pr_load_int(&previous->cluster_id) == thread->cluster_id &&
   99                         ck_pr_load_uint(&previous->splice) == false)
  100                         ck_pr_stall();
  101 
  102                 /* We're head of the global queue, we're done */
  103                 if (ck_pr_load_int(&previous->cluster_id) == thread->cluster_id &&
  104                                 ck_pr_load_uint(&previous->splice) == false)
  105                         return;
  106         }
  107 
  108         /* Now we need to splice the local queue into the global queue. */
  109         local_tail = ck_pr_load_ptr(local_queue);
  110         previous = ck_pr_fas_ptr(glob_queue, local_tail);
  111 
  112         ck_pr_store_uint(&local_tail->splice, true);
  113 
  114         /* Wait until previous thread from the global queue is done with lock. */
  115         while (ck_pr_load_uint(&previous->wait) == true)
  116                 ck_pr_stall();
  117 
  118         ck_pr_fence_lock();
  119         return;
  120 }
  121 
  122 CK_CC_INLINE static void
  123 ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
  124 {
  125         struct ck_spinlock_hclh *previous;
  126 
  127         /*
  128          * If there are waiters, they are spinning on the current node wait
  129          * flag. The flag is cleared so that the successor may complete an
  130          * acquisition. If the caller is pre-empted then the predecessor field
  131          * may be updated by a successor's lock operation. In order to avoid
  132          * this, save a copy of the predecessor before setting the flag.
  133          */
  134         previous = thread[0]->previous;
  135 
  136         /* We have to pay this cost anyways, use it as a compiler barrier too. */
  137         ck_pr_fence_unlock();
  138         ck_pr_store_uint(&(*thread)->wait, false);
  139 
  140         /*
  141          * Predecessor is guaranteed not to be spinning on previous request,
  142          * so update caller to use previous structure. This allows successor
  143          * all the time in the world to successfully read updated wait flag.
  144          */
  145         *thread = previous;
  146         return;
  147 }
  148 #endif /* CK_F_SPINLOCK_HCLH */
  149 #endif /* CK_SPINLOCK_HCLH_H */

Cache object: b026dfb0089377b1d6264fd784f1e490


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.