1 /*-
2 * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #ifndef _MACHINE_ATOMIC_H_
30 #define _MACHINE_ATOMIC_H_
31
32 #define isb() __asm __volatile("isb" : : : "memory")
33
34 /*
35 * Options for DMB and DSB:
36 * oshld Outer Shareable, load
37 * oshst Outer Shareable, store
38 * osh Outer Shareable, all
39 * nshld Non-shareable, load
40 * nshst Non-shareable, store
41 * nsh Non-shareable, all
42 * ishld Inner Shareable, load
43 * ishst Inner Shareable, store
44 * ish Inner Shareable, all
45 * ld Full system, load
46 * st Full system, store
47 * sy Full system, all
48 */
49 #define dsb(opt) __asm __volatile("dsb " __STRING(opt) : : : "memory")
50 #define dmb(opt) __asm __volatile("dmb " __STRING(opt) : : : "memory")
51
52 #define mb() dmb(sy) /* Full system memory barrier all */
53 #define wmb() dmb(st) /* Full system memory barrier store */
54 #define rmb() dmb(ld) /* Full system memory barrier load */
55
56 #if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
57 #include <sys/atomic_san.h>
58 #else
59
60 #include <sys/atomic_common.h>
61
62 #ifdef _KERNEL
63 extern bool lse_supported;
64
65 #ifdef LSE_ATOMICS
66 #define _ATOMIC_LSE_SUPPORTED 1
67 #else
68 #define _ATOMIC_LSE_SUPPORTED lse_supported
69 #endif
70 #else
71 #define _ATOMIC_LSE_SUPPORTED 0
72 #endif
73
74 #define _ATOMIC_OP_PROTO(t, op, bar, flav) \
75 static __inline void \
76 atomic_##op##_##bar##t##flav(volatile uint##t##_t *p, uint##t##_t val)
77
78 #define _ATOMIC_OP_IMPL(t, w, s, op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
79 _ATOMIC_OP_PROTO(t, op, bar, _llsc) \
80 { \
81 uint##t##_t tmp; \
82 int res; \
83 \
84 pre; \
85 __asm __volatile( \
86 "1: ld"#a"xr"#s" %"#w"0, [%2]\n" \
87 " "#llsc_asm_op" %"#w"0, %"#w"0, %"#w"3\n" \
88 " st"#l"xr"#s" %w1, %"#w"0, [%2]\n" \
89 " cbnz %w1, 1b\n" \
90 : "=&r"(tmp), "=&r"(res) \
91 : "r" (p), "r" (val) \
92 : "memory" \
93 ); \
94 } \
95 \
96 _ATOMIC_OP_PROTO(t, op, bar, _lse) \
97 { \
98 uint##t##_t tmp; \
99 \
100 pre; \
101 __asm __volatile( \
102 ".arch_extension lse\n" \
103 "ld"#lse_asm_op#a#l#s" %"#w"2, %"#w"0, [%1]\n" \
104 ".arch_extension nolse\n" \
105 : "=r" (tmp) \
106 : "r" (p), "r" (val) \
107 : "memory" \
108 ); \
109 } \
110 \
111 _ATOMIC_OP_PROTO(t, op, bar, ) \
112 { \
113 if (_ATOMIC_LSE_SUPPORTED) \
114 atomic_##op##_##bar##t##_lse(p, val); \
115 else \
116 atomic_##op##_##bar##t##_llsc(p, val); \
117 }
118
119 #define __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \
120 _ATOMIC_OP_IMPL(8, w, b, op, llsc_asm_op, lse_asm_op, pre, \
121 bar, a, l) \
122 _ATOMIC_OP_IMPL(16, w, h, op, llsc_asm_op, lse_asm_op, pre, \
123 bar, a, l) \
124 _ATOMIC_OP_IMPL(32, w, , op, llsc_asm_op, lse_asm_op, pre, \
125 bar, a, l) \
126 _ATOMIC_OP_IMPL(64, , , op, llsc_asm_op, lse_asm_op, pre, \
127 bar, a, l)
128
129 #define _ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre) \
130 __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, , , ) \
131 __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, acq_, a, ) \
132 __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, rel_, , l)
133
134 _ATOMIC_OP(add, add, add, )
135 _ATOMIC_OP(clear, bic, clr, )
136 _ATOMIC_OP(set, orr, set, )
137 _ATOMIC_OP(subtract, add, add, val = -val)
138
139 #define _ATOMIC_CMPSET_PROTO(t, bar, flav) \
140 static __inline int \
141 atomic_cmpset_##bar##t##flav(volatile uint##t##_t *p, \
142 uint##t##_t cmpval, uint##t##_t newval)
143
144 #define _ATOMIC_FCMPSET_PROTO(t, bar, flav) \
145 static __inline int \
146 atomic_fcmpset_##bar##t##flav(volatile uint##t##_t *p, \
147 uint##t##_t *cmpval, uint##t##_t newval)
148
149 #define _ATOMIC_CMPSET_IMPL(t, w, s, bar, a, l) \
150 _ATOMIC_CMPSET_PROTO(t, bar, _llsc) \
151 { \
152 uint##t##_t tmp; \
153 int res; \
154 \
155 __asm __volatile( \
156 "1: mov %w1, #1\n" \
157 " ld"#a"xr"#s" %"#w"0, [%2]\n" \
158 " cmp %"#w"0, %"#w"3\n" \
159 " b.ne 2f\n" \
160 " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \
161 " cbnz %w1, 1b\n" \
162 "2:" \
163 : "=&r"(tmp), "=&r"(res) \
164 : "r" (p), "r" (cmpval), "r" (newval) \
165 : "cc", "memory" \
166 ); \
167 \
168 return (!res); \
169 } \
170 \
171 _ATOMIC_CMPSET_PROTO(t, bar, _lse) \
172 { \
173 uint##t##_t oldval; \
174 int res; \
175 \
176 oldval = cmpval; \
177 __asm __volatile( \
178 ".arch_extension lse\n" \
179 "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \
180 "cmp %"#w"1, %"#w"2\n" \
181 "cset %w0, eq\n" \
182 ".arch_extension nolse\n" \
183 : "=r" (res), "+&r" (cmpval) \
184 : "r" (oldval), "r" (p), "r" (newval) \
185 : "cc", "memory" \
186 ); \
187 \
188 return (res); \
189 } \
190 \
191 _ATOMIC_CMPSET_PROTO(t, bar, ) \
192 { \
193 if (_ATOMIC_LSE_SUPPORTED) \
194 return (atomic_cmpset_##bar##t##_lse(p, cmpval, \
195 newval)); \
196 else \
197 return (atomic_cmpset_##bar##t##_llsc(p, cmpval, \
198 newval)); \
199 } \
200 \
201 _ATOMIC_FCMPSET_PROTO(t, bar, _llsc) \
202 { \
203 uint##t##_t _cmpval, tmp; \
204 int res; \
205 \
206 _cmpval = *cmpval; \
207 __asm __volatile( \
208 " mov %w1, #1\n" \
209 " ld"#a"xr"#s" %"#w"0, [%2]\n" \
210 " cmp %"#w"0, %"#w"3\n" \
211 " b.ne 1f\n" \
212 " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \
213 "1:" \
214 : "=&r"(tmp), "=&r"(res) \
215 : "r" (p), "r" (_cmpval), "r" (newval) \
216 : "cc", "memory" \
217 ); \
218 *cmpval = tmp; \
219 \
220 return (!res); \
221 } \
222 \
223 _ATOMIC_FCMPSET_PROTO(t, bar, _lse) \
224 { \
225 uint##t##_t _cmpval, tmp; \
226 int res; \
227 \
228 _cmpval = tmp = *cmpval; \
229 __asm __volatile( \
230 ".arch_extension lse\n" \
231 "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \
232 "cmp %"#w"1, %"#w"2\n" \
233 "cset %w0, eq\n" \
234 ".arch_extension nolse\n" \
235 : "=r" (res), "+&r" (tmp) \
236 : "r" (_cmpval), "r" (p), "r" (newval) \
237 : "cc", "memory" \
238 ); \
239 *cmpval = tmp; \
240 \
241 return (res); \
242 } \
243 \
244 _ATOMIC_FCMPSET_PROTO(t, bar, ) \
245 { \
246 if (_ATOMIC_LSE_SUPPORTED) \
247 return (atomic_fcmpset_##bar##t##_lse(p, cmpval, \
248 newval)); \
249 else \
250 return (atomic_fcmpset_##bar##t##_llsc(p, cmpval, \
251 newval)); \
252 }
253
254 #define _ATOMIC_CMPSET(bar, a, l) \
255 _ATOMIC_CMPSET_IMPL(8, w, b, bar, a, l) \
256 _ATOMIC_CMPSET_IMPL(16, w, h, bar, a, l) \
257 _ATOMIC_CMPSET_IMPL(32, w, , bar, a, l) \
258 _ATOMIC_CMPSET_IMPL(64, , , bar, a, l)
259
260 #define atomic_cmpset_8 atomic_cmpset_8
261 #define atomic_fcmpset_8 atomic_fcmpset_8
262 #define atomic_cmpset_16 atomic_cmpset_16
263 #define atomic_fcmpset_16 atomic_fcmpset_16
264
265 _ATOMIC_CMPSET( , , )
266 _ATOMIC_CMPSET(acq_, a, )
267 _ATOMIC_CMPSET(rel_, ,l)
268
269 #define _ATOMIC_FETCHADD_PROTO(t, flav) \
270 static __inline uint##t##_t \
271 atomic_fetchadd_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
272
273 #define _ATOMIC_FETCHADD_IMPL(t, w) \
274 _ATOMIC_FETCHADD_PROTO(t, _llsc) \
275 { \
276 uint##t##_t ret, tmp; \
277 int res; \
278 \
279 __asm __volatile( \
280 "1: ldxr %"#w"2, [%3]\n" \
281 " add %"#w"0, %"#w"2, %"#w"4\n" \
282 " stxr %w1, %"#w"0, [%3]\n" \
283 " cbnz %w1, 1b\n" \
284 : "=&r" (tmp), "=&r" (res), "=&r" (ret) \
285 : "r" (p), "r" (val) \
286 : "memory" \
287 ); \
288 \
289 return (ret); \
290 } \
291 \
292 _ATOMIC_FETCHADD_PROTO(t, _lse) \
293 { \
294 uint##t##_t ret; \
295 \
296 __asm __volatile( \
297 ".arch_extension lse\n" \
298 "ldadd %"#w"2, %"#w"0, [%1]\n" \
299 ".arch_extension nolse\n" \
300 : "=r" (ret) \
301 : "r" (p), "r" (val) \
302 : "memory" \
303 ); \
304 \
305 return (ret); \
306 } \
307 \
308 _ATOMIC_FETCHADD_PROTO(t, ) \
309 { \
310 if (_ATOMIC_LSE_SUPPORTED) \
311 return (atomic_fetchadd_##t##_lse(p, val)); \
312 else \
313 return (atomic_fetchadd_##t##_llsc(p, val)); \
314 }
315
316 _ATOMIC_FETCHADD_IMPL(32, w)
317 _ATOMIC_FETCHADD_IMPL(64, )
318
319 #define _ATOMIC_SWAP_PROTO(t, flav) \
320 static __inline uint##t##_t \
321 atomic_swap_##t##flav(volatile uint##t##_t *p, uint##t##_t val)
322
323 #define _ATOMIC_READANDCLEAR_PROTO(t, flav) \
324 static __inline uint##t##_t \
325 atomic_readandclear_##t##flav(volatile uint##t##_t *p)
326
327 #define _ATOMIC_SWAP_IMPL(t, w, zreg) \
328 _ATOMIC_SWAP_PROTO(t, _llsc) \
329 { \
330 uint##t##_t ret; \
331 int res; \
332 \
333 __asm __volatile( \
334 "1: ldxr %"#w"1, [%2]\n" \
335 " stxr %w0, %"#w"3, [%2]\n" \
336 " cbnz %w0, 1b\n" \
337 : "=&r" (res), "=&r" (ret) \
338 : "r" (p), "r" (val) \
339 : "memory" \
340 ); \
341 \
342 return (ret); \
343 } \
344 \
345 _ATOMIC_SWAP_PROTO(t, _lse) \
346 { \
347 uint##t##_t ret; \
348 \
349 __asm __volatile( \
350 ".arch_extension lse\n" \
351 "swp %"#w"2, %"#w"0, [%1]\n" \
352 ".arch_extension nolse\n" \
353 : "=r" (ret) \
354 : "r" (p), "r" (val) \
355 : "memory" \
356 ); \
357 \
358 return (ret); \
359 } \
360 \
361 _ATOMIC_SWAP_PROTO(t, ) \
362 { \
363 if (_ATOMIC_LSE_SUPPORTED) \
364 return (atomic_swap_##t##_lse(p, val)); \
365 else \
366 return (atomic_swap_##t##_llsc(p, val)); \
367 } \
368 \
369 _ATOMIC_READANDCLEAR_PROTO(t, _llsc) \
370 { \
371 uint##t##_t ret; \
372 int res; \
373 \
374 __asm __volatile( \
375 "1: ldxr %"#w"1, [%2]\n" \
376 " stxr %w0, "#zreg", [%2]\n" \
377 " cbnz %w0, 1b\n" \
378 : "=&r" (res), "=&r" (ret) \
379 : "r" (p) \
380 : "memory" \
381 ); \
382 \
383 return (ret); \
384 } \
385 \
386 _ATOMIC_READANDCLEAR_PROTO(t, _lse) \
387 { \
388 return (atomic_swap_##t##_lse(p, 0)); \
389 } \
390 \
391 _ATOMIC_READANDCLEAR_PROTO(t, ) \
392 { \
393 if (_ATOMIC_LSE_SUPPORTED) \
394 return (atomic_readandclear_##t##_lse(p)); \
395 else \
396 return (atomic_readandclear_##t##_llsc(p)); \
397 }
398
399 _ATOMIC_SWAP_IMPL(32, w, wzr)
400 _ATOMIC_SWAP_IMPL(64, , xzr)
401
402 #define _ATOMIC_TEST_OP_PROTO(t, op, bar, flav) \
403 static __inline int \
404 atomic_testand##op##_##bar##t##flav(volatile uint##t##_t *p, u_int val)
405
406 #define _ATOMIC_TEST_OP_IMPL(t, w, op, llsc_asm_op, lse_asm_op, bar, a) \
407 _ATOMIC_TEST_OP_PROTO(t, op, bar, _llsc) \
408 { \
409 uint##t##_t mask, old, tmp; \
410 int res; \
411 \
412 mask = ((uint##t##_t)1) << (val & (t - 1)); \
413 __asm __volatile( \
414 "1: ld"#a"xr %"#w"2, [%3]\n" \
415 " "#llsc_asm_op" %"#w"0, %"#w"2, %"#w"4\n" \
416 " stxr %w1, %"#w"0, [%3]\n" \
417 " cbnz %w1, 1b\n" \
418 : "=&r" (tmp), "=&r" (res), "=&r" (old) \
419 : "r" (p), "r" (mask) \
420 : "memory" \
421 ); \
422 \
423 return ((old & mask) != 0); \
424 } \
425 \
426 _ATOMIC_TEST_OP_PROTO(t, op, bar, _lse) \
427 { \
428 uint##t##_t mask, old; \
429 \
430 mask = ((uint##t##_t)1) << (val & (t - 1)); \
431 __asm __volatile( \
432 ".arch_extension lse\n" \
433 "ld"#lse_asm_op#a" %"#w"2, %"#w"0, [%1]\n" \
434 ".arch_extension nolse\n" \
435 : "=r" (old) \
436 : "r" (p), "r" (mask) \
437 : "memory" \
438 ); \
439 \
440 return ((old & mask) != 0); \
441 } \
442 \
443 _ATOMIC_TEST_OP_PROTO(t, op, bar, ) \
444 { \
445 if (_ATOMIC_LSE_SUPPORTED) \
446 return (atomic_testand##op##_##bar##t##_lse(p, val)); \
447 else \
448 return (atomic_testand##op##_##bar##t##_llsc(p, val)); \
449 }
450
451 #define _ATOMIC_TEST_OP(op, llsc_asm_op, lse_asm_op) \
452 _ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op, , ) \
453 _ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op, acq_, a) \
454 _ATOMIC_TEST_OP_IMPL(64, , op, llsc_asm_op, lse_asm_op, , ) \
455 _ATOMIC_TEST_OP_IMPL(64, , op, llsc_asm_op, lse_asm_op, acq_, a)
456
457 _ATOMIC_TEST_OP(clear, bic, clr)
458 _ATOMIC_TEST_OP(set, orr, set)
459
460 #define _ATOMIC_LOAD_ACQ_IMPL(t, w, s) \
461 static __inline uint##t##_t \
462 atomic_load_acq_##t(volatile uint##t##_t *p) \
463 { \
464 uint##t##_t ret; \
465 \
466 __asm __volatile( \
467 "ldar"#s" %"#w"0, [%1]\n" \
468 : "=&r" (ret) \
469 : "r" (p) \
470 : "memory"); \
471 \
472 return (ret); \
473 }
474
475 #define atomic_load_acq_8 atomic_load_acq_8
476 #define atomic_load_acq_16 atomic_load_acq_16
477 _ATOMIC_LOAD_ACQ_IMPL(8, w, b)
478 _ATOMIC_LOAD_ACQ_IMPL(16, w, h)
479 _ATOMIC_LOAD_ACQ_IMPL(32, w, )
480 _ATOMIC_LOAD_ACQ_IMPL(64, , )
481
482 #define _ATOMIC_STORE_REL_IMPL(t, w, s) \
483 static __inline void \
484 atomic_store_rel_##t(volatile uint##t##_t *p, uint##t##_t val) \
485 { \
486 __asm __volatile( \
487 "stlr"#s" %"#w"0, [%1]\n" \
488 : \
489 : "r" (val), "r" (p) \
490 : "memory"); \
491 }
492
493 _ATOMIC_STORE_REL_IMPL(8, w, b)
494 _ATOMIC_STORE_REL_IMPL(16, w, h)
495 _ATOMIC_STORE_REL_IMPL(32, w, )
496 _ATOMIC_STORE_REL_IMPL(64, , )
497
498 #define atomic_add_char atomic_add_8
499 #define atomic_fcmpset_char atomic_fcmpset_8
500 #define atomic_clear_char atomic_clear_8
501 #define atomic_cmpset_char atomic_cmpset_8
502 #define atomic_fetchadd_char atomic_fetchadd_8
503 #define atomic_readandclear_char atomic_readandclear_8
504 #define atomic_set_char atomic_set_8
505 #define atomic_swap_char atomic_swap_8
506 #define atomic_subtract_char atomic_subtract_8
507 #define atomic_testandclear_char atomic_testandclear_8
508 #define atomic_testandset_char atomic_testandset_8
509
510 #define atomic_add_acq_char atomic_add_acq_8
511 #define atomic_fcmpset_acq_char atomic_fcmpset_acq_8
512 #define atomic_clear_acq_char atomic_clear_acq_8
513 #define atomic_cmpset_acq_char atomic_cmpset_acq_8
514 #define atomic_load_acq_char atomic_load_acq_8
515 #define atomic_set_acq_char atomic_set_acq_8
516 #define atomic_subtract_acq_char atomic_subtract_acq_8
517 #define atomic_testandset_acq_char atomic_testandset_acq_8
518
519 #define atomic_add_rel_char atomic_add_rel_8
520 #define atomic_fcmpset_rel_char atomic_fcmpset_rel_8
521 #define atomic_clear_rel_char atomic_clear_rel_8
522 #define atomic_cmpset_rel_char atomic_cmpset_rel_8
523 #define atomic_set_rel_char atomic_set_rel_8
524 #define atomic_subtract_rel_char atomic_subtract_rel_8
525 #define atomic_store_rel_char atomic_store_rel_8
526
527 #define atomic_add_short atomic_add_16
528 #define atomic_fcmpset_short atomic_fcmpset_16
529 #define atomic_clear_short atomic_clear_16
530 #define atomic_cmpset_short atomic_cmpset_16
531 #define atomic_fetchadd_short atomic_fetchadd_16
532 #define atomic_readandclear_short atomic_readandclear_16
533 #define atomic_set_short atomic_set_16
534 #define atomic_swap_short atomic_swap_16
535 #define atomic_subtract_short atomic_subtract_16
536 #define atomic_testandclear_short atomic_testandclear_16
537 #define atomic_testandset_short atomic_testandset_16
538
539 #define atomic_add_acq_short atomic_add_acq_16
540 #define atomic_fcmpset_acq_short atomic_fcmpset_acq_16
541 #define atomic_clear_acq_short atomic_clear_acq_16
542 #define atomic_cmpset_acq_short atomic_cmpset_acq_16
543 #define atomic_load_acq_short atomic_load_acq_16
544 #define atomic_set_acq_short atomic_set_acq_16
545 #define atomic_subtract_acq_short atomic_subtract_acq_16
546 #define atomic_testandset_acq_short atomic_testandset_acq_16
547
548 #define atomic_add_rel_short atomic_add_rel_16
549 #define atomic_fcmpset_rel_short atomic_fcmpset_rel_16
550 #define atomic_clear_rel_short atomic_clear_rel_16
551 #define atomic_cmpset_rel_short atomic_cmpset_rel_16
552 #define atomic_set_rel_short atomic_set_rel_16
553 #define atomic_subtract_rel_short atomic_subtract_rel_16
554 #define atomic_store_rel_short atomic_store_rel_16
555
556 #define atomic_add_int atomic_add_32
557 #define atomic_fcmpset_int atomic_fcmpset_32
558 #define atomic_clear_int atomic_clear_32
559 #define atomic_cmpset_int atomic_cmpset_32
560 #define atomic_fetchadd_int atomic_fetchadd_32
561 #define atomic_readandclear_int atomic_readandclear_32
562 #define atomic_set_int atomic_set_32
563 #define atomic_swap_int atomic_swap_32
564 #define atomic_subtract_int atomic_subtract_32
565 #define atomic_testandclear_int atomic_testandclear_32
566 #define atomic_testandset_int atomic_testandset_32
567
568 #define atomic_add_acq_int atomic_add_acq_32
569 #define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
570 #define atomic_clear_acq_int atomic_clear_acq_32
571 #define atomic_cmpset_acq_int atomic_cmpset_acq_32
572 #define atomic_load_acq_int atomic_load_acq_32
573 #define atomic_set_acq_int atomic_set_acq_32
574 #define atomic_subtract_acq_int atomic_subtract_acq_32
575 #define atomic_testandset_acq_int atomic_testandset_acq_32
576
577 #define atomic_add_rel_int atomic_add_rel_32
578 #define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
579 #define atomic_clear_rel_int atomic_clear_rel_32
580 #define atomic_cmpset_rel_int atomic_cmpset_rel_32
581 #define atomic_set_rel_int atomic_set_rel_32
582 #define atomic_subtract_rel_int atomic_subtract_rel_32
583 #define atomic_store_rel_int atomic_store_rel_32
584
585 #define atomic_add_long atomic_add_64
586 #define atomic_fcmpset_long atomic_fcmpset_64
587 #define atomic_clear_long atomic_clear_64
588 #define atomic_cmpset_long atomic_cmpset_64
589 #define atomic_fetchadd_long atomic_fetchadd_64
590 #define atomic_readandclear_long atomic_readandclear_64
591 #define atomic_set_long atomic_set_64
592 #define atomic_swap_long atomic_swap_64
593 #define atomic_subtract_long atomic_subtract_64
594 #define atomic_testandclear_long atomic_testandclear_64
595 #define atomic_testandset_long atomic_testandset_64
596
597 #define atomic_add_ptr atomic_add_64
598 #define atomic_fcmpset_ptr atomic_fcmpset_64
599 #define atomic_clear_ptr atomic_clear_64
600 #define atomic_cmpset_ptr atomic_cmpset_64
601 #define atomic_fetchadd_ptr atomic_fetchadd_64
602 #define atomic_readandclear_ptr atomic_readandclear_64
603 #define atomic_set_ptr atomic_set_64
604 #define atomic_swap_ptr atomic_swap_64
605 #define atomic_subtract_ptr atomic_subtract_64
606
607 #define atomic_add_acq_long atomic_add_acq_64
608 #define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
609 #define atomic_clear_acq_long atomic_clear_acq_64
610 #define atomic_cmpset_acq_long atomic_cmpset_acq_64
611 #define atomic_load_acq_long atomic_load_acq_64
612 #define atomic_set_acq_long atomic_set_acq_64
613 #define atomic_subtract_acq_long atomic_subtract_acq_64
614 #define atomic_testandset_acq_long atomic_testandset_acq_64
615
616 #define atomic_add_acq_ptr atomic_add_acq_64
617 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_64
618 #define atomic_clear_acq_ptr atomic_clear_acq_64
619 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_64
620 #define atomic_load_acq_ptr atomic_load_acq_64
621 #define atomic_set_acq_ptr atomic_set_acq_64
622 #define atomic_subtract_acq_ptr atomic_subtract_acq_64
623
624 #define atomic_add_rel_long atomic_add_rel_64
625 #define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
626 #define atomic_clear_rel_long atomic_clear_rel_64
627 #define atomic_cmpset_rel_long atomic_cmpset_rel_64
628 #define atomic_set_rel_long atomic_set_rel_64
629 #define atomic_subtract_rel_long atomic_subtract_rel_64
630 #define atomic_store_rel_long atomic_store_rel_64
631
632 #define atomic_add_rel_ptr atomic_add_rel_64
633 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_64
634 #define atomic_clear_rel_ptr atomic_clear_rel_64
635 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
636 #define atomic_set_rel_ptr atomic_set_rel_64
637 #define atomic_subtract_rel_ptr atomic_subtract_rel_64
638 #define atomic_store_rel_ptr atomic_store_rel_64
639
640 static __inline void
641 atomic_thread_fence_acq(void)
642 {
643
644 dmb(ld);
645 }
646
647 static __inline void
648 atomic_thread_fence_rel(void)
649 {
650
651 dmb(sy);
652 }
653
654 static __inline void
655 atomic_thread_fence_acq_rel(void)
656 {
657
658 dmb(sy);
659 }
660
661 static __inline void
662 atomic_thread_fence_seq_cst(void)
663 {
664
665 dmb(sy);
666 }
667
668 #endif /* KCSAN && !KCSAN_RUNTIME */
669 #endif /* _MACHINE_ATOMIC_H_ */
Cache object: 3196d67d795528245a2aeff3d3e0cb64
|