1 /*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/11.1/sys/amd64/include/atomic.h 315371 2017-03-16 06:00:27Z mjg $
27 */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
30
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34
35 /*
36 * To express interprocessor (as opposed to processor and device) memory
37 * ordering constraints, use the atomic_*() functions with acquire and release
38 * semantics rather than the *mb() functions. An architecture's memory
39 * ordering (or memory consistency) model governs the order in which a
40 * program's accesses to different locations may be performed by an
41 * implementation of that architecture. In general, for memory regions
42 * defined as writeback cacheable, the memory ordering implemented by amd64
43 * processors preserves the program ordering of a load followed by a load, a
44 * load followed by a store, and a store followed by a store. Only a store
45 * followed by a load to a different memory location may be reordered.
46 * Therefore, except for special cases, like non-temporal memory accesses or
47 * memory regions defined as write combining, the memory ordering effects
48 * provided by the sfence instruction in the wmb() function and the lfence
49 * instruction in the rmb() function are redundant. In contrast, the
50 * atomic_*() functions with acquire and release semantics do not perform
51 * redundant instructions for ordinary cases of interprocessor memory
52 * ordering on any architecture.
53 */
54 #define mb() __asm __volatile("mfence;" : : : "memory")
55 #define wmb() __asm __volatile("sfence;" : : : "memory")
56 #define rmb() __asm __volatile("lfence;" : : : "memory")
57
58 /*
59 * Various simple operations on memory, each of which is atomic in the
60 * presence of interrupts and multiple processors.
61 *
62 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
63 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
64 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
65 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
66 *
67 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
68 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
69 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
70 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
71 *
72 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
73 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
74 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
75 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
76 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
77 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
78 *
79 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
80 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
81 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
82 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
83 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
84 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
85 */
86
87 /*
88 * The above functions are expanded inline in the statically-linked
89 * kernel. Lock prefixes are generated if an SMP kernel is being
90 * built.
91 *
92 * Kernel modules call real functions which are built into the kernel.
93 * This allows kernel modules to be portable between UP and SMP systems.
94 */
95 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
96 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
97 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
98 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
99
100 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
101 int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
102 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
103 int atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
104 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
105 u_long atomic_fetchadd_long(volatile u_long *p, u_long v);
106 int atomic_testandset_int(volatile u_int *p, u_int v);
107 int atomic_testandset_long(volatile u_long *p, u_int v);
108 int atomic_testandclear_int(volatile u_int *p, u_int v);
109 int atomic_testandclear_long(volatile u_long *p, u_int v);
110 void atomic_thread_fence_acq(void);
111 void atomic_thread_fence_acq_rel(void);
112 void atomic_thread_fence_rel(void);
113 void atomic_thread_fence_seq_cst(void);
114
115 #define ATOMIC_LOAD(TYPE) \
116 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
117 #define ATOMIC_STORE(TYPE) \
118 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
119
120 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
121
122 /*
123 * For userland, always use lock prefixes so that the binaries will run
124 * on both SMP and !SMP systems.
125 */
126 #if defined(SMP) || !defined(_KERNEL)
127 #define MPLOCKED "lock ; "
128 #else
129 #define MPLOCKED
130 #endif
131
132 /*
133 * The assembly is volatilized to avoid code chunk removal by the compiler.
134 * GCC aggressively reorders operations and memory clobbering is necessary
135 * in order to avoid that for memory barriers.
136 */
137 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
138 static __inline void \
139 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
140 { \
141 __asm __volatile(MPLOCKED OP \
142 : "+m" (*p) \
143 : CONS (V) \
144 : "cc"); \
145 } \
146 \
147 static __inline void \
148 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
149 { \
150 __asm __volatile(MPLOCKED OP \
151 : "+m" (*p) \
152 : CONS (V) \
153 : "memory", "cc"); \
154 } \
155 struct __hack
156
157 /*
158 * Atomic compare and set, used by the mutex functions
159 *
160 * if (*dst == expect) *dst = src (all 32 bit words)
161 *
162 * Returns 0 on failure, non-zero on success
163 */
164
165 static __inline int
166 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
167 {
168 u_char res;
169
170 __asm __volatile(
171 " " MPLOCKED " "
172 " cmpxchgl %3,%1 ; "
173 " sete %0 ; "
174 "# atomic_cmpset_int"
175 : "=q" (res), /* 0 */
176 "+m" (*dst), /* 1 */
177 "+a" (expect) /* 2 */
178 : "r" (src) /* 3 */
179 : "memory", "cc");
180 return (res);
181 }
182
183 static __inline int
184 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
185 {
186 u_char res;
187
188 __asm __volatile(
189 " " MPLOCKED " "
190 " cmpxchgq %3,%1 ; "
191 " sete %0 ; "
192 "# atomic_cmpset_long"
193 : "=q" (res), /* 0 */
194 "+m" (*dst), /* 1 */
195 "+a" (expect) /* 2 */
196 : "r" (src) /* 3 */
197 : "memory", "cc");
198 return (res);
199 }
200
201 static __inline int
202 atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src)
203 {
204 u_char res;
205
206 __asm __volatile(
207 " " MPLOCKED " "
208 " cmpxchgl %3,%1 ; "
209 " sete %0 ; "
210 "# atomic_fcmpset_int"
211 : "=r" (res), /* 0 */
212 "+m" (*dst), /* 1 */
213 "+a" (*expect) /* 2 */
214 : "r" (src) /* 3 */
215 : "memory", "cc");
216 return (res);
217 }
218
219 static __inline int
220 atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src)
221 {
222 u_char res;
223
224 __asm __volatile(
225 " " MPLOCKED " "
226 " cmpxchgq %3,%1 ; "
227 " sete %0 ; "
228 "# atomic_fcmpset_long"
229 : "=r" (res), /* 0 */
230 "+m" (*dst), /* 1 */
231 "+a" (*expect) /* 2 */
232 : "r" (src) /* 3 */
233 : "memory", "cc");
234 return (res);
235 }
236
237 /*
238 * Atomically add the value of v to the integer pointed to by p and return
239 * the previous value of *p.
240 */
241 static __inline u_int
242 atomic_fetchadd_int(volatile u_int *p, u_int v)
243 {
244
245 __asm __volatile(
246 " " MPLOCKED " "
247 " xaddl %0,%1 ; "
248 "# atomic_fetchadd_int"
249 : "+r" (v), /* 0 */
250 "+m" (*p) /* 1 */
251 : : "cc");
252 return (v);
253 }
254
255 /*
256 * Atomically add the value of v to the long integer pointed to by p and return
257 * the previous value of *p.
258 */
259 static __inline u_long
260 atomic_fetchadd_long(volatile u_long *p, u_long v)
261 {
262
263 __asm __volatile(
264 " " MPLOCKED " "
265 " xaddq %0,%1 ; "
266 "# atomic_fetchadd_long"
267 : "+r" (v), /* 0 */
268 "+m" (*p) /* 1 */
269 : : "cc");
270 return (v);
271 }
272
273 static __inline int
274 atomic_testandset_int(volatile u_int *p, u_int v)
275 {
276 u_char res;
277
278 __asm __volatile(
279 " " MPLOCKED " "
280 " btsl %2,%1 ; "
281 " setc %0 ; "
282 "# atomic_testandset_int"
283 : "=q" (res), /* 0 */
284 "+m" (*p) /* 1 */
285 : "Ir" (v & 0x1f) /* 2 */
286 : "cc");
287 return (res);
288 }
289
290 static __inline int
291 atomic_testandset_long(volatile u_long *p, u_int v)
292 {
293 u_char res;
294
295 __asm __volatile(
296 " " MPLOCKED " "
297 " btsq %2,%1 ; "
298 " setc %0 ; "
299 "# atomic_testandset_long"
300 : "=q" (res), /* 0 */
301 "+m" (*p) /* 1 */
302 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
303 : "cc");
304 return (res);
305 }
306
307 static __inline int
308 atomic_testandclear_int(volatile u_int *p, u_int v)
309 {
310 u_char res;
311
312 __asm __volatile(
313 " " MPLOCKED " "
314 " btrl %2,%1 ; "
315 " setc %0 ; "
316 "# atomic_testandclear_int"
317 : "=q" (res), /* 0 */
318 "+m" (*p) /* 1 */
319 : "Ir" (v & 0x1f) /* 2 */
320 : "cc");
321 return (res);
322 }
323
324 static __inline int
325 atomic_testandclear_long(volatile u_long *p, u_int v)
326 {
327 u_char res;
328
329 __asm __volatile(
330 " " MPLOCKED " "
331 " btrq %2,%1 ; "
332 " setc %0 ; "
333 "# atomic_testandclear_long"
334 : "=q" (res), /* 0 */
335 "+m" (*p) /* 1 */
336 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
337 : "cc");
338 return (res);
339 }
340
341 /*
342 * We assume that a = b will do atomic loads and stores. Due to the
343 * IA32 memory model, a simple store guarantees release semantics.
344 *
345 * However, a load may pass a store if they are performed on distinct
346 * addresses, so we need a Store/Load barrier for sequentially
347 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
348 * Store/Load barrier, as recommended by the AMD Software Optimization
349 * Guide, and not mfence. To avoid false data dependencies, we use a
350 * special address for "mem". In the kernel, we use a private per-cpu
351 * cache line. In user space, we use a word in the stack's red zone
352 * (-8(%rsp)).
353 *
354 * For UP kernels, however, the memory of the single processor is
355 * always consistent, so we only need to stop the compiler from
356 * reordering accesses in a way that violates the semantics of acquire
357 * and release.
358 */
359
360 #if defined(_KERNEL)
361
362 /*
363 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
364 *
365 * The open-coded number is used instead of the symbolic expression to
366 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
367 * An assertion in amd64/vm_machdep.c ensures that the value is correct.
368 */
369 #define OFFSETOF_MONITORBUF 0x180
370
371 #if defined(SMP)
372 static __inline void
373 __storeload_barrier(void)
374 {
375
376 __asm __volatile("lock; addl $0,%%gs:%0"
377 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
378 }
379 #else /* _KERNEL && UP */
380 static __inline void
381 __storeload_barrier(void)
382 {
383
384 __compiler_membar();
385 }
386 #endif /* SMP */
387 #else /* !_KERNEL */
388 static __inline void
389 __storeload_barrier(void)
390 {
391
392 __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
393 }
394 #endif /* _KERNEL*/
395
396 #define ATOMIC_LOAD(TYPE) \
397 static __inline u_##TYPE \
398 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
399 { \
400 u_##TYPE res; \
401 \
402 res = *p; \
403 __compiler_membar(); \
404 return (res); \
405 } \
406 struct __hack
407
408 #define ATOMIC_STORE(TYPE) \
409 static __inline void \
410 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
411 { \
412 \
413 __compiler_membar(); \
414 *p = v; \
415 } \
416 struct __hack
417
418 static __inline void
419 atomic_thread_fence_acq(void)
420 {
421
422 __compiler_membar();
423 }
424
425 static __inline void
426 atomic_thread_fence_rel(void)
427 {
428
429 __compiler_membar();
430 }
431
432 static __inline void
433 atomic_thread_fence_acq_rel(void)
434 {
435
436 __compiler_membar();
437 }
438
439 static __inline void
440 atomic_thread_fence_seq_cst(void)
441 {
442
443 __storeload_barrier();
444 }
445
446 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
447
448 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
449 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
450 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
451 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
452
453 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
454 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
455 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
456 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
457
458 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
459 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
460 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
461 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
462
463 ATOMIC_ASM(set, long, "orq %1,%0", "ir", v);
464 ATOMIC_ASM(clear, long, "andq %1,%0", "ir", ~v);
465 ATOMIC_ASM(add, long, "addq %1,%0", "ir", v);
466 ATOMIC_ASM(subtract, long, "subq %1,%0", "ir", v);
467
468 #define ATOMIC_LOADSTORE(TYPE) \
469 ATOMIC_LOAD(TYPE); \
470 ATOMIC_STORE(TYPE)
471
472 ATOMIC_LOADSTORE(char);
473 ATOMIC_LOADSTORE(short);
474 ATOMIC_LOADSTORE(int);
475 ATOMIC_LOADSTORE(long);
476
477 #undef ATOMIC_ASM
478 #undef ATOMIC_LOAD
479 #undef ATOMIC_STORE
480 #undef ATOMIC_LOADSTORE
481 #ifndef WANT_FUNCTIONS
482
483 /* Read the current value and store a new value in the destination. */
484 #ifdef __GNUCLIKE_ASM
485
486 static __inline u_int
487 atomic_swap_int(volatile u_int *p, u_int v)
488 {
489
490 __asm __volatile(
491 " xchgl %1,%0 ; "
492 "# atomic_swap_int"
493 : "+r" (v), /* 0 */
494 "+m" (*p)); /* 1 */
495 return (v);
496 }
497
498 static __inline u_long
499 atomic_swap_long(volatile u_long *p, u_long v)
500 {
501
502 __asm __volatile(
503 " xchgq %1,%0 ; "
504 "# atomic_swap_long"
505 : "+r" (v), /* 0 */
506 "+m" (*p)); /* 1 */
507 return (v);
508 }
509
510 #else /* !__GNUCLIKE_ASM */
511
512 u_int atomic_swap_int(volatile u_int *p, u_int v);
513 u_long atomic_swap_long(volatile u_long *p, u_long v);
514
515 #endif /* __GNUCLIKE_ASM */
516
517 #define atomic_set_acq_char atomic_set_barr_char
518 #define atomic_set_rel_char atomic_set_barr_char
519 #define atomic_clear_acq_char atomic_clear_barr_char
520 #define atomic_clear_rel_char atomic_clear_barr_char
521 #define atomic_add_acq_char atomic_add_barr_char
522 #define atomic_add_rel_char atomic_add_barr_char
523 #define atomic_subtract_acq_char atomic_subtract_barr_char
524 #define atomic_subtract_rel_char atomic_subtract_barr_char
525
526 #define atomic_set_acq_short atomic_set_barr_short
527 #define atomic_set_rel_short atomic_set_barr_short
528 #define atomic_clear_acq_short atomic_clear_barr_short
529 #define atomic_clear_rel_short atomic_clear_barr_short
530 #define atomic_add_acq_short atomic_add_barr_short
531 #define atomic_add_rel_short atomic_add_barr_short
532 #define atomic_subtract_acq_short atomic_subtract_barr_short
533 #define atomic_subtract_rel_short atomic_subtract_barr_short
534
535 #define atomic_set_acq_int atomic_set_barr_int
536 #define atomic_set_rel_int atomic_set_barr_int
537 #define atomic_clear_acq_int atomic_clear_barr_int
538 #define atomic_clear_rel_int atomic_clear_barr_int
539 #define atomic_add_acq_int atomic_add_barr_int
540 #define atomic_add_rel_int atomic_add_barr_int
541 #define atomic_subtract_acq_int atomic_subtract_barr_int
542 #define atomic_subtract_rel_int atomic_subtract_barr_int
543 #define atomic_cmpset_acq_int atomic_cmpset_int
544 #define atomic_cmpset_rel_int atomic_cmpset_int
545 #define atomic_fcmpset_acq_int atomic_fcmpset_int
546 #define atomic_fcmpset_rel_int atomic_fcmpset_int
547
548 #define atomic_set_acq_long atomic_set_barr_long
549 #define atomic_set_rel_long atomic_set_barr_long
550 #define atomic_clear_acq_long atomic_clear_barr_long
551 #define atomic_clear_rel_long atomic_clear_barr_long
552 #define atomic_add_acq_long atomic_add_barr_long
553 #define atomic_add_rel_long atomic_add_barr_long
554 #define atomic_subtract_acq_long atomic_subtract_barr_long
555 #define atomic_subtract_rel_long atomic_subtract_barr_long
556 #define atomic_cmpset_acq_long atomic_cmpset_long
557 #define atomic_cmpset_rel_long atomic_cmpset_long
558 #define atomic_fcmpset_acq_long atomic_fcmpset_long
559 #define atomic_fcmpset_rel_long atomic_fcmpset_long
560
561 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
562 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
563
564 /* Operations on 8-bit bytes. */
565 #define atomic_set_8 atomic_set_char
566 #define atomic_set_acq_8 atomic_set_acq_char
567 #define atomic_set_rel_8 atomic_set_rel_char
568 #define atomic_clear_8 atomic_clear_char
569 #define atomic_clear_acq_8 atomic_clear_acq_char
570 #define atomic_clear_rel_8 atomic_clear_rel_char
571 #define atomic_add_8 atomic_add_char
572 #define atomic_add_acq_8 atomic_add_acq_char
573 #define atomic_add_rel_8 atomic_add_rel_char
574 #define atomic_subtract_8 atomic_subtract_char
575 #define atomic_subtract_acq_8 atomic_subtract_acq_char
576 #define atomic_subtract_rel_8 atomic_subtract_rel_char
577 #define atomic_load_acq_8 atomic_load_acq_char
578 #define atomic_store_rel_8 atomic_store_rel_char
579
580 /* Operations on 16-bit words. */
581 #define atomic_set_16 atomic_set_short
582 #define atomic_set_acq_16 atomic_set_acq_short
583 #define atomic_set_rel_16 atomic_set_rel_short
584 #define atomic_clear_16 atomic_clear_short
585 #define atomic_clear_acq_16 atomic_clear_acq_short
586 #define atomic_clear_rel_16 atomic_clear_rel_short
587 #define atomic_add_16 atomic_add_short
588 #define atomic_add_acq_16 atomic_add_acq_short
589 #define atomic_add_rel_16 atomic_add_rel_short
590 #define atomic_subtract_16 atomic_subtract_short
591 #define atomic_subtract_acq_16 atomic_subtract_acq_short
592 #define atomic_subtract_rel_16 atomic_subtract_rel_short
593 #define atomic_load_acq_16 atomic_load_acq_short
594 #define atomic_store_rel_16 atomic_store_rel_short
595
596 /* Operations on 32-bit double words. */
597 #define atomic_set_32 atomic_set_int
598 #define atomic_set_acq_32 atomic_set_acq_int
599 #define atomic_set_rel_32 atomic_set_rel_int
600 #define atomic_clear_32 atomic_clear_int
601 #define atomic_clear_acq_32 atomic_clear_acq_int
602 #define atomic_clear_rel_32 atomic_clear_rel_int
603 #define atomic_add_32 atomic_add_int
604 #define atomic_add_acq_32 atomic_add_acq_int
605 #define atomic_add_rel_32 atomic_add_rel_int
606 #define atomic_subtract_32 atomic_subtract_int
607 #define atomic_subtract_acq_32 atomic_subtract_acq_int
608 #define atomic_subtract_rel_32 atomic_subtract_rel_int
609 #define atomic_load_acq_32 atomic_load_acq_int
610 #define atomic_store_rel_32 atomic_store_rel_int
611 #define atomic_cmpset_32 atomic_cmpset_int
612 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
613 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
614 #define atomic_fcmpset_32 atomic_fcmpset_int
615 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
616 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
617 #define atomic_swap_32 atomic_swap_int
618 #define atomic_readandclear_32 atomic_readandclear_int
619 #define atomic_fetchadd_32 atomic_fetchadd_int
620 #define atomic_testandset_32 atomic_testandset_int
621 #define atomic_testandclear_32 atomic_testandclear_int
622
623 /* Operations on 64-bit quad words. */
624 #define atomic_set_64 atomic_set_long
625 #define atomic_set_acq_64 atomic_set_acq_long
626 #define atomic_set_rel_64 atomic_set_rel_long
627 #define atomic_clear_64 atomic_clear_long
628 #define atomic_clear_acq_64 atomic_clear_acq_long
629 #define atomic_clear_rel_64 atomic_clear_rel_long
630 #define atomic_add_64 atomic_add_long
631 #define atomic_add_acq_64 atomic_add_acq_long
632 #define atomic_add_rel_64 atomic_add_rel_long
633 #define atomic_subtract_64 atomic_subtract_long
634 #define atomic_subtract_acq_64 atomic_subtract_acq_long
635 #define atomic_subtract_rel_64 atomic_subtract_rel_long
636 #define atomic_load_acq_64 atomic_load_acq_long
637 #define atomic_store_rel_64 atomic_store_rel_long
638 #define atomic_cmpset_64 atomic_cmpset_long
639 #define atomic_cmpset_acq_64 atomic_cmpset_acq_long
640 #define atomic_cmpset_rel_64 atomic_cmpset_rel_long
641 #define atomic_fcmpset_64 atomic_fcmpset_long
642 #define atomic_fcmpset_acq_64 atomic_fcmpset_acq_long
643 #define atomic_fcmpset_rel_64 atomic_fcmpset_rel_long
644 #define atomic_swap_64 atomic_swap_long
645 #define atomic_readandclear_64 atomic_readandclear_long
646 #define atomic_fetchadd_64 atomic_fetchadd_long
647 #define atomic_testandset_64 atomic_testandset_long
648 #define atomic_testandclear_64 atomic_testandclear_long
649
650 /* Operations on pointers. */
651 #define atomic_set_ptr atomic_set_long
652 #define atomic_set_acq_ptr atomic_set_acq_long
653 #define atomic_set_rel_ptr atomic_set_rel_long
654 #define atomic_clear_ptr atomic_clear_long
655 #define atomic_clear_acq_ptr atomic_clear_acq_long
656 #define atomic_clear_rel_ptr atomic_clear_rel_long
657 #define atomic_add_ptr atomic_add_long
658 #define atomic_add_acq_ptr atomic_add_acq_long
659 #define atomic_add_rel_ptr atomic_add_rel_long
660 #define atomic_subtract_ptr atomic_subtract_long
661 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
662 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
663 #define atomic_load_acq_ptr atomic_load_acq_long
664 #define atomic_store_rel_ptr atomic_store_rel_long
665 #define atomic_cmpset_ptr atomic_cmpset_long
666 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
667 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
668 #define atomic_fcmpset_ptr atomic_fcmpset_long
669 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
670 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
671 #define atomic_swap_ptr atomic_swap_long
672 #define atomic_readandclear_ptr atomic_readandclear_long
673
674 #endif /* !WANT_FUNCTIONS */
675
676 #endif /* !_MACHINE_ATOMIC_H_ */
Cache object: 68f61929d32afb3f7d61324f4c8bcd96
|