1 /*
2 * Copyright 2013-2015 Samy Al Bahra.
3 * Copyright 2013 Brendon Scheinman.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #ifndef CK_RWCOHORT_H
29 #define CK_RWCOHORT_H
30
31 /*
32 * This is an implementation of NUMA-aware reader-writer locks as described in:
33 * Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014.
34 * NUMA-Aware Reader-Writer Locks
35 */
36
37 #include <ck_cc.h>
38 #include <ck_pr.h>
39 #include <ck_stddef.h>
40 #include <ck_cohort.h>
41
42 #define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N
43 #define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N)
44 #define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL)
45 #define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) \
46 ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC)
47 #define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) \
48 ck_rwcohort_wp_##N##_read_unlock(RW)
49 #define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) \
50 ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC)
51 #define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) \
52 ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC)
53 #define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000
54
55 #define CK_RWCOHORT_WP_PROTOTYPE(N) \
56 CK_RWCOHORT_WP_INSTANCE(N) { \
57 unsigned int read_counter; \
58 unsigned int write_barrier; \
59 unsigned int wait_limit; \
60 }; \
61 CK_CC_INLINE static void \
62 ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
63 unsigned int wait_limit) \
64 { \
65 \
66 rw_cohort->read_counter = 0; \
67 rw_cohort->write_barrier = 0; \
68 rw_cohort->wait_limit = wait_limit; \
69 ck_pr_barrier(); \
70 return; \
71 } \
72 CK_CC_INLINE static void \
73 ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
74 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
75 void *local_context) \
76 { \
77 \
78 while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) \
79 ck_pr_stall(); \
80 \
81 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
82 \
83 while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) \
84 ck_pr_stall(); \
85 \
86 return; \
87 } \
88 CK_CC_INLINE static void \
89 ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
90 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
91 void *local_context) \
92 { \
93 \
94 (void)rw_cohort; \
95 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
96 return; \
97 } \
98 CK_CC_INLINE static void \
99 ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
100 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
101 void *local_context) \
102 { \
103 unsigned int wait_count = 0; \
104 bool raised = false; \
105 \
106 for (;;) { \
107 ck_pr_inc_uint(&rw_cohort->read_counter); \
108 ck_pr_fence_atomic_load(); \
109 if (CK_COHORT_LOCKED(N, cohort, global_context, \
110 local_context) == false) \
111 break; \
112 \
113 ck_pr_dec_uint(&rw_cohort->read_counter); \
114 while (CK_COHORT_LOCKED(N, cohort, global_context, \
115 local_context) == true) { \
116 ck_pr_stall(); \
117 if (++wait_count > rw_cohort->wait_limit && \
118 raised == false) { \
119 ck_pr_inc_uint(&rw_cohort->write_barrier); \
120 raised = true; \
121 } \
122 } \
123 } \
124 \
125 if (raised == true) \
126 ck_pr_dec_uint(&rw_cohort->write_barrier); \
127 \
128 ck_pr_fence_load(); \
129 return; \
130 } \
131 CK_CC_INLINE static void \
132 ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \
133 { \
134 \
135 ck_pr_fence_load_atomic(); \
136 ck_pr_dec_uint(&cohort->read_counter); \
137 return; \
138 }
139
140 #define CK_RWCOHORT_WP_INITIALIZER { \
141 .read_counter = 0, \
142 .write_barrier = 0, \
143 .wait_limit = 0 \
144 }
145
146 #define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N
147 #define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N)
148 #define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL)
149 #define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) \
150 ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC)
151 #define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) \
152 ck_rwcohort_rp_##N##_read_unlock(RW)
153 #define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) \
154 ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC)
155 #define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) \
156 ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC)
157 #define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000
158
159 #define CK_RWCOHORT_RP_PROTOTYPE(N) \
160 CK_RWCOHORT_RP_INSTANCE(N) { \
161 unsigned int read_counter; \
162 unsigned int read_barrier; \
163 unsigned int wait_limit; \
164 }; \
165 CK_CC_INLINE static void \
166 ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
167 unsigned int wait_limit) \
168 { \
169 \
170 rw_cohort->read_counter = 0; \
171 rw_cohort->read_barrier = 0; \
172 rw_cohort->wait_limit = wait_limit; \
173 ck_pr_barrier(); \
174 return; \
175 } \
176 CK_CC_INLINE static void \
177 ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
178 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
179 void *local_context) \
180 { \
181 unsigned int wait_count = 0; \
182 bool raised = false; \
183 \
184 for (;;) { \
185 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
186 if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) \
187 break; \
188 \
189 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
190 while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
191 ck_pr_stall(); \
192 if (++wait_count > rw_cohort->wait_limit && \
193 raised == false) { \
194 ck_pr_inc_uint(&rw_cohort->read_barrier); \
195 raised = true; \
196 } \
197 } \
198 } \
199 \
200 if (raised == true) \
201 ck_pr_dec_uint(&rw_cohort->read_barrier); \
202 \
203 return; \
204 } \
205 CK_CC_INLINE static void \
206 ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
207 CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
208 { \
209 \
210 (void)rw_cohort; \
211 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
212 return; \
213 } \
214 CK_CC_INLINE static void \
215 ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
216 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
217 void *local_context) \
218 { \
219 \
220 while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) \
221 ck_pr_stall(); \
222 \
223 ck_pr_inc_uint(&rw_cohort->read_counter); \
224 ck_pr_fence_atomic_load(); \
225 \
226 while (CK_COHORT_LOCKED(N, cohort, global_context, \
227 local_context) == true) \
228 ck_pr_stall(); \
229 \
230 return; \
231 } \
232 CK_CC_INLINE static void \
233 ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \
234 { \
235 \
236 ck_pr_fence_load_atomic(); \
237 ck_pr_dec_uint(&cohort->read_counter); \
238 return; \
239 }
240
241 #define CK_RWCOHORT_RP_INITIALIZER { \
242 .read_counter = 0, \
243 .read_barrier = 0, \
244 .wait_limit = 0 \
245 }
246
247 #define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N
248 #define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N)
249 #define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW)
250 #define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) \
251 ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC)
252 #define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) \
253 ck_rwcohort_neutral_##N##_read_unlock(RW)
254 #define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) \
255 ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC)
256 #define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) \
257 ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC)
258 #define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000
259
260 #define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \
261 CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \
262 unsigned int read_counter; \
263 }; \
264 CK_CC_INLINE static void \
265 ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \
266 { \
267 \
268 rw_cohort->read_counter = 0; \
269 ck_pr_barrier(); \
270 return; \
271 } \
272 CK_CC_INLINE static void \
273 ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
274 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
275 void *local_context) \
276 { \
277 \
278 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
279 while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
280 ck_pr_stall(); \
281 } \
282 return; \
283 } \
284 CK_CC_INLINE static void \
285 ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
286 CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
287 { \
288 \
289 (void)rw_cohort; \
290 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
291 return; \
292 } \
293 CK_CC_INLINE static void \
294 ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \
295 CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
296 void *local_context) \
297 { \
298 \
299 CK_COHORT_LOCK(N, cohort, global_context, local_context); \
300 ck_pr_inc_uint(&rw_cohort->read_counter); \
301 CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
302 return; \
303 } \
304 CK_CC_INLINE static void \
305 ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \
306 { \
307 \
308 ck_pr_fence_load_atomic(); \
309 ck_pr_dec_uint(&cohort->read_counter); \
310 return; \
311 }
312
313 #define CK_RWCOHORT_NEUTRAL_INITIALIZER { \
314 .read_counter = 0, \
315 }
316
317 #endif /* CK_RWCOHORT_H */
Cache object: 58e524caacd42f4e827d7074ea8421fd
|