1 /*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
30
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39
40 #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41 #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42 #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
43
44 /*
45 * Various simple operations on memory, each of which is atomic in the
46 * presence of interrupts and multiple processors.
47 *
48 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
49 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
50 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
51 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
52 *
53 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
54 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
55 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
56 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
57 *
58 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
59 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
60 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
61 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
62 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
64 *
65 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
66 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
67 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
68 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
69 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
71 */
72
73 /*
74 * The above functions are expanded inline in the statically-linked
75 * kernel. Lock prefixes are generated if an SMP kernel is being
76 * built.
77 *
78 * Kernel modules call real functions which are built into the kernel.
79 * This allows kernel modules to be portable between UP and SMP systems.
80 */
81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86 int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
87 int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
88 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
89 int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
90 int atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
91 u_short src);
92 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
93 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
94 int atomic_testandset_int(volatile u_int *p, u_int v);
95 int atomic_testandclear_int(volatile u_int *p, u_int v);
96
97 #define ATOMIC_LOAD(TYPE, LOP) \
98 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
99 #define ATOMIC_STORE(TYPE) \
100 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
101
102 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
103 uint64_t atomic_load_acq_64(volatile uint64_t *);
104 void atomic_store_rel_64(volatile uint64_t *, uint64_t);
105 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
106 uint64_t atomic_fetchadd_64(volatile uint64_t *, uint64_t);
107
108 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
109
110 /*
111 * For userland, always use lock prefixes so that the binaries will run
112 * on both SMP and !SMP systems.
113 */
114 #if defined(SMP) || !defined(_KERNEL)
115 #define MPLOCKED "lock ; "
116 #else
117 #define MPLOCKED
118 #endif
119
120 /*
121 * The assembly is volatilized to avoid code chunk removal by the compiler.
122 * GCC aggressively reorders operations and memory clobbering is necessary
123 * in order to avoid that for memory barriers.
124 */
125 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
126 static __inline void \
127 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
128 { \
129 __asm __volatile(MPLOCKED OP \
130 : "+m" (*p) \
131 : CONS (V) \
132 : "cc"); \
133 } \
134 \
135 static __inline void \
136 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
137 { \
138 __asm __volatile(MPLOCKED OP \
139 : "+m" (*p) \
140 : CONS (V) \
141 : "memory", "cc"); \
142 } \
143 struct __hack
144
145 /*
146 * Atomic compare and set, used by the mutex functions.
147 *
148 * cmpset:
149 * if (*dst == expect)
150 * *dst = src
151 *
152 * fcmpset:
153 * if (*dst == *expect)
154 * *dst = src
155 * else
156 * *expect = *dst
157 *
158 * Returns 0 on failure, non-zero on success.
159 */
160 #define ATOMIC_CMPSET(TYPE, CONS) \
161 static __inline int \
162 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
163 { \
164 u_char res; \
165 \
166 __asm __volatile( \
167 " " MPLOCKED " " \
168 " cmpxchg %3,%1 ; " \
169 " sete %0 ; " \
170 "# atomic_cmpset_" #TYPE " " \
171 : "=q" (res), /* 0 */ \
172 "+m" (*dst), /* 1 */ \
173 "+a" (expect) /* 2 */ \
174 : CONS (src) /* 3 */ \
175 : "memory", "cc"); \
176 return (res); \
177 } \
178 \
179 static __inline int \
180 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
181 { \
182 u_char res; \
183 \
184 __asm __volatile( \
185 " " MPLOCKED " " \
186 " cmpxchg %3,%1 ; " \
187 " sete %0 ; " \
188 "# atomic_fcmpset_" #TYPE " " \
189 : "=q" (res), /* 0 */ \
190 "+m" (*dst), /* 1 */ \
191 "+a" (*expect) /* 2 */ \
192 : CONS (src) /* 3 */ \
193 : "memory", "cc"); \
194 return (res); \
195 }
196
197 ATOMIC_CMPSET(char, "q");
198 ATOMIC_CMPSET(short, "r");
199 ATOMIC_CMPSET(int, "r");
200
201 /*
202 * Atomically add the value of v to the integer pointed to by p and return
203 * the previous value of *p.
204 */
205 static __inline u_int
206 atomic_fetchadd_int(volatile u_int *p, u_int v)
207 {
208
209 __asm __volatile(
210 " " MPLOCKED " "
211 " xaddl %0,%1 ; "
212 "# atomic_fetchadd_int"
213 : "+r" (v), /* 0 */
214 "+m" (*p) /* 1 */
215 : : "cc");
216 return (v);
217 }
218
219 static __inline int
220 atomic_testandset_int(volatile u_int *p, u_int v)
221 {
222 u_char res;
223
224 __asm __volatile(
225 " " MPLOCKED " "
226 " btsl %2,%1 ; "
227 " setc %0 ; "
228 "# atomic_testandset_int"
229 : "=q" (res), /* 0 */
230 "+m" (*p) /* 1 */
231 : "Ir" (v & 0x1f) /* 2 */
232 : "cc");
233 return (res);
234 }
235
236 static __inline int
237 atomic_testandclear_int(volatile u_int *p, u_int v)
238 {
239 u_char res;
240
241 __asm __volatile(
242 " " MPLOCKED " "
243 " btrl %2,%1 ; "
244 " setc %0 ; "
245 "# atomic_testandclear_int"
246 : "=q" (res), /* 0 */
247 "+m" (*p) /* 1 */
248 : "Ir" (v & 0x1f) /* 2 */
249 : "cc");
250 return (res);
251 }
252
253 /*
254 * We assume that a = b will do atomic loads and stores. Due to the
255 * IA32 memory model, a simple store guarantees release semantics.
256 *
257 * However, loads may pass stores, so for atomic_load_acq we have to
258 * ensure a Store/Load barrier to do the load in SMP kernels. We use
259 * "lock cmpxchg" as recommended by the AMD Software Optimization
260 * Guide, and not mfence. For UP kernels, however, the cache of the
261 * single processor is always consistent, so we only need to take care
262 * of the compiler.
263 */
264 #define ATOMIC_STORE(TYPE) \
265 static __inline void \
266 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
267 { \
268 __compiler_membar(); \
269 *p = v; \
270 } \
271 struct __hack
272
273 #if defined(_KERNEL) && !defined(SMP)
274
275 #define ATOMIC_LOAD(TYPE, LOP) \
276 static __inline u_##TYPE \
277 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
278 { \
279 u_##TYPE tmp; \
280 \
281 tmp = *p; \
282 __compiler_membar(); \
283 return (tmp); \
284 } \
285 struct __hack
286
287 #else /* !(_KERNEL && !SMP) */
288
289 #define ATOMIC_LOAD(TYPE, LOP) \
290 static __inline u_##TYPE \
291 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
292 { \
293 u_##TYPE res; \
294 \
295 __asm __volatile(MPLOCKED LOP \
296 : "=a" (res), /* 0 */ \
297 "+m" (*p) /* 1 */ \
298 : : "memory", "cc"); \
299 return (res); \
300 } \
301 struct __hack
302
303 #endif /* _KERNEL && !SMP */
304
305 #ifdef _KERNEL
306
307 #ifdef WANT_FUNCTIONS
308 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
309 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
310 uint64_t atomic_load_acq_64_i386(volatile uint64_t *);
311 uint64_t atomic_load_acq_64_i586(volatile uint64_t *);
312 void atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
313 void atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
314 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t);
315 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t);
316 #endif
317
318 /* I486 does not support SMP or CMPXCHG8B. */
319 static __inline int
320 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
321 {
322 volatile uint32_t *p;
323 u_char res;
324
325 p = (volatile uint32_t *)dst;
326 __asm __volatile(
327 " pushfl ; "
328 " cli ; "
329 " xorl %1,%%eax ; "
330 " xorl %2,%%edx ; "
331 " orl %%edx,%%eax ; "
332 " jne 1f ; "
333 " movl %4,%1 ; "
334 " movl %5,%2 ; "
335 "1: "
336 " sete %3 ; "
337 " popfl"
338 : "+A" (expect), /* 0 */
339 "+m" (*p), /* 1 */
340 "+m" (*(p + 1)), /* 2 */
341 "=q" (res) /* 3 */
342 : "r" ((uint32_t)src), /* 4 */
343 "r" ((uint32_t)(src >> 32)) /* 5 */
344 : "memory", "cc");
345 return (res);
346 }
347
348 static __inline uint64_t
349 atomic_load_acq_64_i386(volatile uint64_t *p)
350 {
351 volatile uint32_t *q;
352 uint64_t res;
353
354 q = (volatile uint32_t *)p;
355 __asm __volatile(
356 " pushfl ; "
357 " cli ; "
358 " movl %1,%%eax ; "
359 " movl %2,%%edx ; "
360 " popfl"
361 : "=&A" (res) /* 0 */
362 : "m" (*q), /* 1 */
363 "m" (*(q + 1)) /* 2 */
364 : "memory");
365 return (res);
366 }
367
368 static __inline void
369 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
370 {
371 volatile uint32_t *q;
372
373 q = (volatile uint32_t *)p;
374 __asm __volatile(
375 " pushfl ; "
376 " cli ; "
377 " movl %%eax,%0 ; "
378 " movl %%edx,%1 ; "
379 " popfl"
380 : "=m" (*q), /* 0 */
381 "=m" (*(q + 1)) /* 1 */
382 : "A" (v) /* 2 */
383 : "memory");
384 }
385
386 static __inline uint64_t
387 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
388 {
389 volatile uint32_t *q;
390 uint64_t res;
391
392 q = (volatile uint32_t *)p;
393 __asm __volatile(
394 " pushfl ; "
395 " cli ; "
396 " movl %1,%%eax ; "
397 " movl %2,%%edx ; "
398 " movl %4,%2 ; "
399 " movl %3,%1 ; "
400 " popfl"
401 : "=&A" (res), /* 0 */
402 "+m" (*q), /* 1 */
403 "+m" (*(q + 1)) /* 2 */
404 : "r" ((uint32_t)v), /* 3 */
405 "r" ((uint32_t)(v >> 32))); /* 4 */
406 return (res);
407 }
408
409 static __inline int
410 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
411 {
412 u_char res;
413
414 __asm __volatile(
415 " " MPLOCKED " "
416 " cmpxchg8b %1 ; "
417 " sete %0"
418 : "=q" (res), /* 0 */
419 "+m" (*dst), /* 1 */
420 "+A" (expect) /* 2 */
421 : "b" ((uint32_t)src), /* 3 */
422 "c" ((uint32_t)(src >> 32)) /* 4 */
423 : "memory", "cc");
424 return (res);
425 }
426
427 static __inline uint64_t
428 atomic_load_acq_64_i586(volatile uint64_t *p)
429 {
430 uint64_t res;
431
432 __asm __volatile(
433 " movl %%ebx,%%eax ; "
434 " movl %%ecx,%%edx ; "
435 " " MPLOCKED " "
436 " cmpxchg8b %1"
437 : "=&A" (res), /* 0 */
438 "+m" (*p) /* 1 */
439 : : "memory", "cc");
440 return (res);
441 }
442
443 static __inline void
444 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
445 {
446
447 __asm __volatile(
448 " movl %%eax,%%ebx ; "
449 " movl %%edx,%%ecx ; "
450 "1: "
451 " " MPLOCKED " "
452 " cmpxchg8b %0 ; "
453 " jne 1b"
454 : "+m" (*p), /* 0 */
455 "+A" (v) /* 1 */
456 : : "ebx", "ecx", "memory", "cc");
457 }
458
459 static __inline uint64_t
460 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
461 {
462
463 __asm __volatile(
464 " movl %%eax,%%ebx ; "
465 " movl %%edx,%%ecx ; "
466 "1: "
467 " " MPLOCKED " "
468 " cmpxchg8b %0 ; "
469 " jne 1b"
470 : "+m" (*p), /* 0 */
471 "+A" (v) /* 1 */
472 : : "ebx", "ecx", "memory", "cc");
473 return (v);
474 }
475
476 static __inline int
477 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
478 {
479
480 if ((cpu_feature & CPUID_CX8) == 0)
481 return (atomic_cmpset_64_i386(dst, expect, src));
482 else
483 return (atomic_cmpset_64_i586(dst, expect, src));
484 }
485
486 static __inline uint64_t
487 atomic_load_acq_64(volatile uint64_t *p)
488 {
489
490 if ((cpu_feature & CPUID_CX8) == 0)
491 return (atomic_load_acq_64_i386(p));
492 else
493 return (atomic_load_acq_64_i586(p));
494 }
495
496 static __inline void
497 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
498 {
499
500 if ((cpu_feature & CPUID_CX8) == 0)
501 atomic_store_rel_64_i386(p, v);
502 else
503 atomic_store_rel_64_i586(p, v);
504 }
505
506 static __inline uint64_t
507 atomic_swap_64(volatile uint64_t *p, uint64_t v)
508 {
509
510 if ((cpu_feature & CPUID_CX8) == 0)
511 return (atomic_swap_64_i386(p, v));
512 else
513 return (atomic_swap_64_i586(p, v));
514 }
515
516 static __inline uint64_t
517 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
518 {
519
520 for (;;) {
521 uint64_t t = *p;
522 if (atomic_cmpset_64(p, t, t + v))
523 return (t);
524 }
525 }
526
527 #endif /* _KERNEL */
528
529 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
530
531 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
532 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
533 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
534 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
535
536 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
537 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
538 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
539 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
540
541 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
542 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
543 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
544 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
545
546 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
547 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
548 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
549 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
550
551 ATOMIC_LOAD(char, "cmpxchgb %b0,%1");
552 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
553 ATOMIC_LOAD(int, "cmpxchgl %0,%1");
554 ATOMIC_LOAD(long, "cmpxchgl %0,%1");
555
556 ATOMIC_STORE(char);
557 ATOMIC_STORE(short);
558 ATOMIC_STORE(int);
559 ATOMIC_STORE(long);
560
561 #undef ATOMIC_ASM
562 #undef ATOMIC_LOAD
563 #undef ATOMIC_STORE
564
565 #ifndef WANT_FUNCTIONS
566
567 static __inline int
568 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
569 {
570
571 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
572 (u_int)src));
573 }
574
575 static __inline u_long
576 atomic_fetchadd_long(volatile u_long *p, u_long v)
577 {
578
579 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
580 }
581
582 static __inline int
583 atomic_testandset_long(volatile u_long *p, u_int v)
584 {
585
586 return (atomic_testandset_int((volatile u_int *)p, v));
587 }
588
589 static __inline int
590 atomic_testandclear_long(volatile u_long *p, u_int v)
591 {
592
593 return (atomic_testandclear_int((volatile u_int *)p, v));
594 }
595
596 /* Read the current value and store a new value in the destination. */
597 #ifdef __GNUCLIKE_ASM
598
599 static __inline u_int
600 atomic_swap_int(volatile u_int *p, u_int v)
601 {
602
603 __asm __volatile(
604 " xchgl %1,%0 ; "
605 "# atomic_swap_int"
606 : "+r" (v), /* 0 */
607 "+m" (*p)); /* 1 */
608 return (v);
609 }
610
611 static __inline u_long
612 atomic_swap_long(volatile u_long *p, u_long v)
613 {
614
615 return (atomic_swap_int((volatile u_int *)p, (u_int)v));
616 }
617
618 #else /* !__GNUCLIKE_ASM */
619
620 u_int atomic_swap_int(volatile u_int *p, u_int v);
621 u_long atomic_swap_long(volatile u_long *p, u_long v);
622
623 #endif /* __GNUCLIKE_ASM */
624
625 #define atomic_set_acq_char atomic_set_barr_char
626 #define atomic_set_rel_char atomic_set_barr_char
627 #define atomic_clear_acq_char atomic_clear_barr_char
628 #define atomic_clear_rel_char atomic_clear_barr_char
629 #define atomic_add_acq_char atomic_add_barr_char
630 #define atomic_add_rel_char atomic_add_barr_char
631 #define atomic_subtract_acq_char atomic_subtract_barr_char
632 #define atomic_subtract_rel_char atomic_subtract_barr_char
633 #define atomic_cmpset_acq_char atomic_cmpset_char
634 #define atomic_cmpset_rel_char atomic_cmpset_char
635 #define atomic_fcmpset_acq_char atomic_fcmpset_char
636 #define atomic_fcmpset_rel_char atomic_fcmpset_char
637
638 #define atomic_set_acq_short atomic_set_barr_short
639 #define atomic_set_rel_short atomic_set_barr_short
640 #define atomic_clear_acq_short atomic_clear_barr_short
641 #define atomic_clear_rel_short atomic_clear_barr_short
642 #define atomic_add_acq_short atomic_add_barr_short
643 #define atomic_add_rel_short atomic_add_barr_short
644 #define atomic_subtract_acq_short atomic_subtract_barr_short
645 #define atomic_subtract_rel_short atomic_subtract_barr_short
646 #define atomic_cmpset_acq_short atomic_cmpset_short
647 #define atomic_cmpset_rel_short atomic_cmpset_short
648 #define atomic_fcmpset_acq_short atomic_fcmpset_short
649 #define atomic_fcmpset_rel_short atomic_fcmpset_short
650
651 #define atomic_set_acq_int atomic_set_barr_int
652 #define atomic_set_rel_int atomic_set_barr_int
653 #define atomic_clear_acq_int atomic_clear_barr_int
654 #define atomic_clear_rel_int atomic_clear_barr_int
655 #define atomic_add_acq_int atomic_add_barr_int
656 #define atomic_add_rel_int atomic_add_barr_int
657 #define atomic_subtract_acq_int atomic_subtract_barr_int
658 #define atomic_subtract_rel_int atomic_subtract_barr_int
659 #define atomic_cmpset_acq_int atomic_cmpset_int
660 #define atomic_cmpset_rel_int atomic_cmpset_int
661 #define atomic_fcmpset_acq_int atomic_fcmpset_int
662 #define atomic_fcmpset_rel_int atomic_fcmpset_int
663
664 #define atomic_set_acq_long atomic_set_barr_long
665 #define atomic_set_rel_long atomic_set_barr_long
666 #define atomic_clear_acq_long atomic_clear_barr_long
667 #define atomic_clear_rel_long atomic_clear_barr_long
668 #define atomic_add_acq_long atomic_add_barr_long
669 #define atomic_add_rel_long atomic_add_barr_long
670 #define atomic_subtract_acq_long atomic_subtract_barr_long
671 #define atomic_subtract_rel_long atomic_subtract_barr_long
672 #define atomic_cmpset_acq_long atomic_cmpset_long
673 #define atomic_cmpset_rel_long atomic_cmpset_long
674 #define atomic_fcmpset_acq_long atomic_fcmpset_long
675 #define atomic_fcmpset_rel_long atomic_fcmpset_long
676
677 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
678 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
679
680 /* Operations on 8-bit bytes. */
681 #define atomic_set_8 atomic_set_char
682 #define atomic_set_acq_8 atomic_set_acq_char
683 #define atomic_set_rel_8 atomic_set_rel_char
684 #define atomic_clear_8 atomic_clear_char
685 #define atomic_clear_acq_8 atomic_clear_acq_char
686 #define atomic_clear_rel_8 atomic_clear_rel_char
687 #define atomic_add_8 atomic_add_char
688 #define atomic_add_acq_8 atomic_add_acq_char
689 #define atomic_add_rel_8 atomic_add_rel_char
690 #define atomic_subtract_8 atomic_subtract_char
691 #define atomic_subtract_acq_8 atomic_subtract_acq_char
692 #define atomic_subtract_rel_8 atomic_subtract_rel_char
693 #define atomic_load_acq_8 atomic_load_acq_char
694 #define atomic_store_rel_8 atomic_store_rel_char
695 #define atomic_cmpset_8 atomic_cmpset_char
696 #define atomic_cmpset_acq_8 atomic_cmpset_acq_char
697 #define atomic_cmpset_rel_8 atomic_cmpset_rel_char
698 #define atomic_fcmpset_8 atomic_fcmpset_char
699 #define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char
700 #define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char
701
702 /* Operations on 16-bit words. */
703 #define atomic_set_16 atomic_set_short
704 #define atomic_set_acq_16 atomic_set_acq_short
705 #define atomic_set_rel_16 atomic_set_rel_short
706 #define atomic_clear_16 atomic_clear_short
707 #define atomic_clear_acq_16 atomic_clear_acq_short
708 #define atomic_clear_rel_16 atomic_clear_rel_short
709 #define atomic_add_16 atomic_add_short
710 #define atomic_add_acq_16 atomic_add_acq_short
711 #define atomic_add_rel_16 atomic_add_rel_short
712 #define atomic_subtract_16 atomic_subtract_short
713 #define atomic_subtract_acq_16 atomic_subtract_acq_short
714 #define atomic_subtract_rel_16 atomic_subtract_rel_short
715 #define atomic_load_acq_16 atomic_load_acq_short
716 #define atomic_store_rel_16 atomic_store_rel_short
717 #define atomic_cmpset_16 atomic_cmpset_short
718 #define atomic_cmpset_acq_16 atomic_cmpset_acq_short
719 #define atomic_cmpset_rel_16 atomic_cmpset_rel_short
720 #define atomic_fcmpset_16 atomic_fcmpset_short
721 #define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short
722 #define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short
723
724 /* Operations on 32-bit double words. */
725 #define atomic_set_32 atomic_set_int
726 #define atomic_set_acq_32 atomic_set_acq_int
727 #define atomic_set_rel_32 atomic_set_rel_int
728 #define atomic_clear_32 atomic_clear_int
729 #define atomic_clear_acq_32 atomic_clear_acq_int
730 #define atomic_clear_rel_32 atomic_clear_rel_int
731 #define atomic_add_32 atomic_add_int
732 #define atomic_add_acq_32 atomic_add_acq_int
733 #define atomic_add_rel_32 atomic_add_rel_int
734 #define atomic_subtract_32 atomic_subtract_int
735 #define atomic_subtract_acq_32 atomic_subtract_acq_int
736 #define atomic_subtract_rel_32 atomic_subtract_rel_int
737 #define atomic_load_acq_32 atomic_load_acq_int
738 #define atomic_store_rel_32 atomic_store_rel_int
739 #define atomic_cmpset_32 atomic_cmpset_int
740 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
741 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
742 #define atomic_fcmpset_32 atomic_fcmpset_int
743 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
744 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
745 #define atomic_swap_32 atomic_swap_int
746 #define atomic_readandclear_32 atomic_readandclear_int
747 #define atomic_fetchadd_32 atomic_fetchadd_int
748 #define atomic_testandset_32 atomic_testandset_int
749 #define atomic_testandclear_32 atomic_testandclear_int
750
751 /* Operations on pointers. */
752 #define atomic_set_ptr(p, v) \
753 atomic_set_int((volatile u_int *)(p), (u_int)(v))
754 #define atomic_set_acq_ptr(p, v) \
755 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
756 #define atomic_set_rel_ptr(p, v) \
757 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
758 #define atomic_clear_ptr(p, v) \
759 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
760 #define atomic_clear_acq_ptr(p, v) \
761 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
762 #define atomic_clear_rel_ptr(p, v) \
763 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
764 #define atomic_add_ptr(p, v) \
765 atomic_add_int((volatile u_int *)(p), (u_int)(v))
766 #define atomic_add_acq_ptr(p, v) \
767 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
768 #define atomic_add_rel_ptr(p, v) \
769 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
770 #define atomic_subtract_ptr(p, v) \
771 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
772 #define atomic_subtract_acq_ptr(p, v) \
773 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
774 #define atomic_subtract_rel_ptr(p, v) \
775 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
776 #define atomic_load_acq_ptr(p) \
777 atomic_load_acq_int((volatile u_int *)(p))
778 #define atomic_store_rel_ptr(p, v) \
779 atomic_store_rel_int((volatile u_int *)(p), (v))
780 #define atomic_cmpset_ptr(dst, old, new) \
781 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
782 #define atomic_cmpset_acq_ptr(dst, old, new) \
783 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
784 (u_int)(new))
785 #define atomic_cmpset_rel_ptr(dst, old, new) \
786 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
787 (u_int)(new))
788 #define atomic_fcmpset_ptr(dst, old, new) \
789 atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
790 #define atomic_fcmpset_acq_ptr(dst, old, new) \
791 atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
792 (u_int)(new))
793 #define atomic_fcmpset_rel_ptr(dst, old, new) \
794 atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
795 (u_int)(new))
796 #define atomic_swap_ptr(p, v) \
797 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
798 #define atomic_readandclear_ptr(p) \
799 atomic_readandclear_int((volatile u_int *)(p))
800
801 #endif /* !WANT_FUNCTIONS */
802
803 #endif /* !_MACHINE_ATOMIC_H_ */
Cache object: 6605c0ee05bd5788af5acfded157a64e
|