1 /*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
30
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34
35 #ifdef _KERNEL
36 #include <machine/md_var.h>
37 #include <machine/specialreg.h>
38 #endif
39
40 #define mb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
41 #define wmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
42 #define rmb() __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
43
44 /*
45 * Various simple operations on memory, each of which is atomic in the
46 * presence of interrupts and multiple processors.
47 *
48 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
49 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
50 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
51 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
52 *
53 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
54 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
55 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
56 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
57 *
58 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
59 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
60 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
61 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
62 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
63 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
64 *
65 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
66 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
67 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
68 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
69 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
70 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
71 */
72
73 /*
74 * The above functions are expanded inline in the statically-linked
75 * kernel. Lock prefixes are generated if an SMP kernel is being
76 * built.
77 *
78 * Kernel modules call real functions which are built into the kernel.
79 * This allows kernel modules to be portable between UP and SMP systems.
80 */
81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
82 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
85
86 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
87 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
88 int atomic_testandset_int(volatile u_int *p, u_int v);
89
90 #define ATOMIC_LOAD(TYPE, LOP) \
91 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
92 #define ATOMIC_STORE(TYPE) \
93 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
94
95 int atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
96 uint64_t atomic_swap_64(volatile uint64_t *, uint64_t);
97 uint64_t atomic_fetchadd_64(volatile uint64_t *, uint64_t);
98
99 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
100
101 /*
102 * For userland, always use lock prefixes so that the binaries will run
103 * on both SMP and !SMP systems.
104 */
105 #if defined(SMP) || !defined(_KERNEL)
106 #define MPLOCKED "lock ; "
107 #else
108 #define MPLOCKED
109 #endif
110
111 /*
112 * The assembly is volatilized to avoid code chunk removal by the compiler.
113 * GCC aggressively reorders operations and memory clobbering is necessary
114 * in order to avoid that for memory barriers.
115 */
116 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
117 static __inline void \
118 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
119 { \
120 __asm __volatile(MPLOCKED OP \
121 : "=m" (*p) \
122 : CONS (V), "m" (*p) \
123 : "cc"); \
124 } \
125 \
126 static __inline void \
127 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
128 { \
129 __asm __volatile(MPLOCKED OP \
130 : "=m" (*p) \
131 : CONS (V), "m" (*p) \
132 : "memory", "cc"); \
133 } \
134 struct __hack
135
136 #if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
137
138 /* I486 does not support SMP or CMPXCHG8B. */
139 static __inline uint64_t
140 atomic_load_acq_64_i386(volatile uint64_t *p)
141 {
142 volatile uint32_t *high, *low;
143 uint64_t res;
144
145 low = (volatile uint32_t *)p;
146 high = (volatile uint32_t *)p + 1;
147 __asm __volatile(
148 " pushfl ; "
149 " cli ; "
150 " movl %1,%%eax ; "
151 " movl %2,%%edx ; "
152 " popfl"
153 : "=&A" (res) /* 0 */
154 : "m" (*low), /* 1 */
155 "m" (*high) /* 2 */
156 : "memory");
157
158 return (res);
159 }
160
161 static __inline void
162 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
163 {
164 volatile uint32_t *high, *low;
165
166 low = (volatile uint32_t *)p;
167 high = (volatile uint32_t *)p + 1;
168 __asm __volatile(
169 " pushfl ; "
170 " cli ; "
171 " movl %%eax,%0 ; "
172 " movl %%edx,%1 ; "
173 " popfl"
174 : "=m" (*low), /* 0 */
175 "=m" (*high) /* 1 */
176 : "A" (v) /* 2 */
177 : "memory");
178 }
179
180 static __inline uint64_t
181 atomic_load_acq_64_i586(volatile uint64_t *p)
182 {
183 uint64_t res;
184
185 __asm __volatile(
186 " movl %%ebx,%%eax ; "
187 " movl %%ecx,%%edx ; "
188 " " MPLOCKED " "
189 " cmpxchg8b %2"
190 : "=&A" (res), /* 0 */
191 "=m" (*p) /* 1 */
192 : "m" (*p) /* 2 */
193 : "memory", "cc");
194
195 return (res);
196 }
197
198 static __inline void
199 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
200 {
201
202 __asm __volatile(
203 " movl %%eax,%%ebx ; "
204 " movl %%edx,%%ecx ; "
205 "1: "
206 " " MPLOCKED " "
207 " cmpxchg8b %2 ; "
208 " jne 1b"
209 : "=m" (*p), /* 0 */
210 "+A" (v) /* 1 */
211 : "m" (*p) /* 2 */
212 : "ebx", "ecx", "memory", "cc");
213 }
214
215 #endif /* _KERNEL && !WANT_FUNCTIONS */
216
217 /*
218 * Atomic compare and set, used by the mutex functions
219 *
220 * if (*dst == expect) *dst = src (all 32 bit words)
221 *
222 * Returns 0 on failure, non-zero on success
223 */
224
225 #ifdef CPU_DISABLE_CMPXCHG
226
227 static __inline int
228 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
229 {
230 u_char res;
231
232 __asm __volatile(
233 " pushfl ; "
234 " cli ; "
235 " cmpl %3,%4 ; "
236 " jne 1f ; "
237 " movl %2,%1 ; "
238 "1: "
239 " sete %0 ; "
240 " popfl ; "
241 "# atomic_cmpset_int"
242 : "=q" (res), /* 0 */
243 "=m" (*dst) /* 1 */
244 : "r" (src), /* 2 */
245 "r" (expect), /* 3 */
246 "m" (*dst) /* 4 */
247 : "memory");
248
249 return (res);
250 }
251
252 #else /* !CPU_DISABLE_CMPXCHG */
253
254 static __inline int
255 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
256 {
257 u_char res;
258
259 __asm __volatile(
260 " " MPLOCKED " "
261 " cmpxchgl %2,%1 ; "
262 " sete %0 ; "
263 "1: "
264 "# atomic_cmpset_int"
265 : "=a" (res), /* 0 */
266 "=m" (*dst) /* 1 */
267 : "r" (src), /* 2 */
268 "a" (expect), /* 3 */
269 "m" (*dst) /* 4 */
270 : "memory", "cc");
271
272 return (res);
273 }
274
275 #endif /* CPU_DISABLE_CMPXCHG */
276
277 /*
278 * Atomically add the value of v to the integer pointed to by p and return
279 * the previous value of *p.
280 */
281 static __inline u_int
282 atomic_fetchadd_int(volatile u_int *p, u_int v)
283 {
284
285 __asm __volatile(
286 " " MPLOCKED " "
287 " xaddl %0, %1 ; "
288 "# atomic_fetchadd_int"
289 : "+r" (v), /* 0 (result) */
290 "=m" (*p) /* 1 */
291 : "m" (*p) /* 2 */
292 : "cc");
293 return (v);
294 }
295
296 static __inline int
297 atomic_testandset_int(volatile u_int *p, u_int v)
298 {
299 u_char res;
300
301 __asm __volatile(
302 " " MPLOCKED " "
303 " btsl %2,%1 ; "
304 " setc %0 ; "
305 "# atomic_testandset_int"
306 : "=q" (res), /* 0 */
307 "+m" (*p) /* 1 */
308 : "Ir" (v & 0x1f) /* 2 */
309 : "cc");
310 return (res);
311 }
312
313 /*
314 * We assume that a = b will do atomic loads and stores. Due to the
315 * IA32 memory model, a simple store guarantees release semantics.
316 *
317 * However, loads may pass stores, so for atomic_load_acq we have to
318 * ensure a Store/Load barrier to do the load in SMP kernels. We use
319 * "lock cmpxchg" as recommended by the AMD Software Optimization
320 * Guide, and not mfence. For UP kernels, however, the cache of the
321 * single processor is always consistent, so we only need to take care
322 * of the compiler.
323 */
324 #define ATOMIC_STORE(TYPE) \
325 static __inline void \
326 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
327 { \
328 __compiler_membar(); \
329 *p = v; \
330 } \
331 struct __hack
332
333 #if defined(_KERNEL) && !defined(SMP)
334
335 #define ATOMIC_LOAD(TYPE, LOP) \
336 static __inline u_##TYPE \
337 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
338 { \
339 u_##TYPE tmp; \
340 \
341 tmp = *p; \
342 __compiler_membar(); \
343 return (tmp); \
344 } \
345 struct __hack
346
347 #else /* !(_KERNEL && !SMP) */
348
349 #define ATOMIC_LOAD(TYPE, LOP) \
350 static __inline u_##TYPE \
351 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
352 { \
353 u_##TYPE res; \
354 \
355 __asm __volatile(MPLOCKED LOP \
356 : "=a" (res), /* 0 */ \
357 "=m" (*p) /* 1 */ \
358 : "m" (*p) /* 2 */ \
359 : "memory", "cc"); \
360 \
361 return (res); \
362 } \
363 struct __hack
364
365 #endif /* _KERNEL && !SMP */
366
367 #ifdef _KERNEL
368
369 #ifdef WANT_FUNCTIONS
370 int atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
371 int atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
372 uint64_t atomic_swap_64_i386(volatile uint64_t *, uint64_t);
373 uint64_t atomic_swap_64_i586(volatile uint64_t *, uint64_t);
374 #endif
375
376 /* I486 does not support SMP or CMPXCHG8B. */
377 static __inline int
378 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
379 {
380 volatile uint32_t *p;
381 u_char res;
382
383 p = (volatile uint32_t *)dst;
384 __asm __volatile(
385 " pushfl ; "
386 " cli ; "
387 " xorl %1,%%eax ; "
388 " xorl %2,%%edx ; "
389 " orl %%edx,%%eax ; "
390 " jne 1f ; "
391 " movl %4,%1 ; "
392 " movl %5,%2 ; "
393 "1: "
394 " sete %3 ; "
395 " popfl"
396 : "+A" (expect), /* 0 */
397 "+m" (*p), /* 1 */
398 "+m" (*(p + 1)), /* 2 */
399 "=q" (res) /* 3 */
400 : "r" ((uint32_t)src), /* 4 */
401 "r" ((uint32_t)(src >> 32)) /* 5 */
402 : "memory", "cc");
403 return (res);
404 }
405
406 static __inline uint64_t
407 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
408 {
409 volatile uint32_t *q;
410 uint64_t res;
411
412 q = (volatile uint32_t *)p;
413 __asm __volatile(
414 " pushfl ; "
415 " cli ; "
416 " movl %1,%%eax ; "
417 " movl %2,%%edx ; "
418 " movl %4,%2 ; "
419 " movl %3,%1 ; "
420 " popfl"
421 : "=&A" (res), /* 0 */
422 "+m" (*q), /* 1 */
423 "+m" (*(q + 1)) /* 2 */
424 : "r" ((uint32_t)v), /* 3 */
425 "r" ((uint32_t)(v >> 32))); /* 4 */
426 return (res);
427 }
428
429 static __inline int
430 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
431 {
432 u_char res;
433
434 __asm __volatile(
435 " " MPLOCKED " "
436 " cmpxchg8b %1 ; "
437 " sete %0"
438 : "=q" (res), /* 0 */
439 "+m" (*dst), /* 1 */
440 "+A" (expect) /* 2 */
441 : "b" ((uint32_t)src), /* 3 */
442 "c" ((uint32_t)(src >> 32)) /* 4 */
443 : "memory", "cc");
444 return (res);
445 }
446
447 static __inline uint64_t
448 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
449 {
450
451 __asm __volatile(
452 " movl %%eax,%%ebx ; "
453 " movl %%edx,%%ecx ; "
454 "1: "
455 " " MPLOCKED " "
456 " cmpxchg8b %0 ; "
457 " jne 1b"
458 : "+m" (*p), /* 0 */
459 "+A" (v) /* 1 */
460 : : "ebx", "ecx", "memory", "cc");
461 return (v);
462 }
463
464 static __inline int
465 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
466 {
467
468 if ((cpu_feature & CPUID_CX8) == 0)
469 return (atomic_cmpset_64_i386(dst, expect, src));
470 else
471 return (atomic_cmpset_64_i586(dst, expect, src));
472 }
473
474 static __inline uint64_t
475 atomic_swap_64(volatile uint64_t *p, uint64_t v)
476 {
477
478 if ((cpu_feature & CPUID_CX8) == 0)
479 return (atomic_swap_64_i386(p, v));
480 else
481 return (atomic_swap_64_i586(p, v));
482 }
483
484 static __inline uint64_t
485 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
486 {
487
488 for (;;) {
489 uint64_t t = *p;
490 if (atomic_cmpset_64(p, t, t + v))
491 return (t);
492 }
493 }
494
495 #endif /* _KERNEL */
496
497 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
498
499 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
500 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
501 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
502 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
503
504 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
505 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
506 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
507 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
508
509 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
510 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
511 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
512 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
513
514 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
515 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
516 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
517 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
518
519 ATOMIC_LOAD(char, "cmpxchgb %b0,%1");
520 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
521 ATOMIC_LOAD(int, "cmpxchgl %0,%1");
522 ATOMIC_LOAD(long, "cmpxchgl %0,%1");
523
524 ATOMIC_STORE(char);
525 ATOMIC_STORE(short);
526 ATOMIC_STORE(int);
527 ATOMIC_STORE(long);
528
529 #undef ATOMIC_ASM
530 #undef ATOMIC_LOAD
531 #undef ATOMIC_STORE
532
533 #ifndef WANT_FUNCTIONS
534
535 #ifdef _KERNEL
536 extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
537 extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
538 #endif
539
540 static __inline int
541 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
542 {
543
544 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
545 (u_int)src));
546 }
547
548 static __inline u_long
549 atomic_fetchadd_long(volatile u_long *p, u_long v)
550 {
551
552 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
553 }
554
555 static __inline int
556 atomic_testandset_long(volatile u_long *p, u_int v)
557 {
558
559 return (atomic_testandset_int((volatile u_int *)p, v));
560 }
561
562 /* Read the current value and store a zero in the destination. */
563 #ifdef __GNUCLIKE_ASM
564
565 static __inline u_int
566 atomic_readandclear_int(volatile u_int *addr)
567 {
568 u_int res;
569
570 res = 0;
571 __asm __volatile(
572 " xchgl %1,%0 ; "
573 "# atomic_readandclear_int"
574 : "+r" (res), /* 0 */
575 "=m" (*addr) /* 1 */
576 : "m" (*addr));
577
578 return (res);
579 }
580
581 static __inline u_long
582 atomic_readandclear_long(volatile u_long *addr)
583 {
584 u_long res;
585
586 res = 0;
587 __asm __volatile(
588 " xchgl %1,%0 ; "
589 "# atomic_readandclear_long"
590 : "+r" (res), /* 0 */
591 "=m" (*addr) /* 1 */
592 : "m" (*addr));
593
594 return (res);
595 }
596
597 static __inline u_int
598 atomic_swap_int(volatile u_int *p, u_int v)
599 {
600
601 __asm __volatile(
602 " xchgl %1,%0 ; "
603 "# atomic_swap_int"
604 : "+r" (v), /* 0 */
605 "+m" (*p)); /* 1 */
606 return (v);
607 }
608
609 static __inline u_long
610 atomic_swap_long(volatile u_long *p, u_long v)
611 {
612
613 return (atomic_swap_int((volatile u_int *)p, (u_int)v));
614 }
615
616 #else /* !__GNUCLIKE_ASM */
617
618 u_int atomic_readandclear_int(volatile u_int *addr);
619 u_long atomic_readandclear_long(volatile u_long *addr);
620 u_int atomic_swap_int(volatile u_int *p, u_int v);
621 u_long atomic_swap_long(volatile u_long *p, u_long v);
622
623 #endif /* __GNUCLIKE_ASM */
624
625 #define atomic_set_acq_char atomic_set_barr_char
626 #define atomic_set_rel_char atomic_set_barr_char
627 #define atomic_clear_acq_char atomic_clear_barr_char
628 #define atomic_clear_rel_char atomic_clear_barr_char
629 #define atomic_add_acq_char atomic_add_barr_char
630 #define atomic_add_rel_char atomic_add_barr_char
631 #define atomic_subtract_acq_char atomic_subtract_barr_char
632 #define atomic_subtract_rel_char atomic_subtract_barr_char
633
634 #define atomic_set_acq_short atomic_set_barr_short
635 #define atomic_set_rel_short atomic_set_barr_short
636 #define atomic_clear_acq_short atomic_clear_barr_short
637 #define atomic_clear_rel_short atomic_clear_barr_short
638 #define atomic_add_acq_short atomic_add_barr_short
639 #define atomic_add_rel_short atomic_add_barr_short
640 #define atomic_subtract_acq_short atomic_subtract_barr_short
641 #define atomic_subtract_rel_short atomic_subtract_barr_short
642
643 #define atomic_set_acq_int atomic_set_barr_int
644 #define atomic_set_rel_int atomic_set_barr_int
645 #define atomic_clear_acq_int atomic_clear_barr_int
646 #define atomic_clear_rel_int atomic_clear_barr_int
647 #define atomic_add_acq_int atomic_add_barr_int
648 #define atomic_add_rel_int atomic_add_barr_int
649 #define atomic_subtract_acq_int atomic_subtract_barr_int
650 #define atomic_subtract_rel_int atomic_subtract_barr_int
651 #define atomic_cmpset_acq_int atomic_cmpset_int
652 #define atomic_cmpset_rel_int atomic_cmpset_int
653
654 #define atomic_set_acq_long atomic_set_barr_long
655 #define atomic_set_rel_long atomic_set_barr_long
656 #define atomic_clear_acq_long atomic_clear_barr_long
657 #define atomic_clear_rel_long atomic_clear_barr_long
658 #define atomic_add_acq_long atomic_add_barr_long
659 #define atomic_add_rel_long atomic_add_barr_long
660 #define atomic_subtract_acq_long atomic_subtract_barr_long
661 #define atomic_subtract_rel_long atomic_subtract_barr_long
662 #define atomic_cmpset_acq_long atomic_cmpset_long
663 #define atomic_cmpset_rel_long atomic_cmpset_long
664
665 /* Operations on 8-bit bytes. */
666 #define atomic_set_8 atomic_set_char
667 #define atomic_set_acq_8 atomic_set_acq_char
668 #define atomic_set_rel_8 atomic_set_rel_char
669 #define atomic_clear_8 atomic_clear_char
670 #define atomic_clear_acq_8 atomic_clear_acq_char
671 #define atomic_clear_rel_8 atomic_clear_rel_char
672 #define atomic_add_8 atomic_add_char
673 #define atomic_add_acq_8 atomic_add_acq_char
674 #define atomic_add_rel_8 atomic_add_rel_char
675 #define atomic_subtract_8 atomic_subtract_char
676 #define atomic_subtract_acq_8 atomic_subtract_acq_char
677 #define atomic_subtract_rel_8 atomic_subtract_rel_char
678 #define atomic_load_acq_8 atomic_load_acq_char
679 #define atomic_store_rel_8 atomic_store_rel_char
680
681 /* Operations on 16-bit words. */
682 #define atomic_set_16 atomic_set_short
683 #define atomic_set_acq_16 atomic_set_acq_short
684 #define atomic_set_rel_16 atomic_set_rel_short
685 #define atomic_clear_16 atomic_clear_short
686 #define atomic_clear_acq_16 atomic_clear_acq_short
687 #define atomic_clear_rel_16 atomic_clear_rel_short
688 #define atomic_add_16 atomic_add_short
689 #define atomic_add_acq_16 atomic_add_acq_short
690 #define atomic_add_rel_16 atomic_add_rel_short
691 #define atomic_subtract_16 atomic_subtract_short
692 #define atomic_subtract_acq_16 atomic_subtract_acq_short
693 #define atomic_subtract_rel_16 atomic_subtract_rel_short
694 #define atomic_load_acq_16 atomic_load_acq_short
695 #define atomic_store_rel_16 atomic_store_rel_short
696
697 /* Operations on 32-bit double words. */
698 #define atomic_set_32 atomic_set_int
699 #define atomic_set_acq_32 atomic_set_acq_int
700 #define atomic_set_rel_32 atomic_set_rel_int
701 #define atomic_clear_32 atomic_clear_int
702 #define atomic_clear_acq_32 atomic_clear_acq_int
703 #define atomic_clear_rel_32 atomic_clear_rel_int
704 #define atomic_add_32 atomic_add_int
705 #define atomic_add_acq_32 atomic_add_acq_int
706 #define atomic_add_rel_32 atomic_add_rel_int
707 #define atomic_subtract_32 atomic_subtract_int
708 #define atomic_subtract_acq_32 atomic_subtract_acq_int
709 #define atomic_subtract_rel_32 atomic_subtract_rel_int
710 #define atomic_load_acq_32 atomic_load_acq_int
711 #define atomic_store_rel_32 atomic_store_rel_int
712 #define atomic_cmpset_32 atomic_cmpset_int
713 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
714 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
715 #define atomic_swap_32 atomic_swap_int
716 #define atomic_readandclear_32 atomic_readandclear_int
717 #define atomic_fetchadd_32 atomic_fetchadd_int
718 #define atomic_testandset_32 atomic_testandset_int
719
720 /* Operations on pointers. */
721 #define atomic_set_ptr(p, v) \
722 atomic_set_int((volatile u_int *)(p), (u_int)(v))
723 #define atomic_set_acq_ptr(p, v) \
724 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
725 #define atomic_set_rel_ptr(p, v) \
726 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
727 #define atomic_clear_ptr(p, v) \
728 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
729 #define atomic_clear_acq_ptr(p, v) \
730 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
731 #define atomic_clear_rel_ptr(p, v) \
732 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
733 #define atomic_add_ptr(p, v) \
734 atomic_add_int((volatile u_int *)(p), (u_int)(v))
735 #define atomic_add_acq_ptr(p, v) \
736 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
737 #define atomic_add_rel_ptr(p, v) \
738 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
739 #define atomic_subtract_ptr(p, v) \
740 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
741 #define atomic_subtract_acq_ptr(p, v) \
742 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
743 #define atomic_subtract_rel_ptr(p, v) \
744 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
745 #define atomic_load_acq_ptr(p) \
746 atomic_load_acq_int((volatile u_int *)(p))
747 #define atomic_store_rel_ptr(p, v) \
748 atomic_store_rel_int((volatile u_int *)(p), (v))
749 #define atomic_cmpset_ptr(dst, old, new) \
750 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
751 #define atomic_cmpset_acq_ptr(dst, old, new) \
752 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
753 (u_int)(new))
754 #define atomic_cmpset_rel_ptr(dst, old, new) \
755 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
756 (u_int)(new))
757 #define atomic_swap_ptr(p, v) \
758 atomic_swap_int((volatile u_int *)(p), (u_int)(v))
759 #define atomic_readandclear_ptr(p) \
760 atomic_readandclear_int((volatile u_int *)(p))
761
762 #endif /* !WANT_FUNCTIONS */
763
764 #endif /* !_MACHINE_ATOMIC_H_ */
Cache object: 835e12d24f04acd5f191f7f076e6b821
|