1 /*-
2 * Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #ifndef _OPENSOLARIS_SYS_ATOMIC_H_
30 #define _OPENSOLARIS_SYS_ATOMIC_H_
31
32 #include <sys/types.h>
33 #include <machine/atomic.h>
34
35 #if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE))
36 #define I386_HAVE_ATOMIC64
37 #endif
38
39 #if defined(__i386__) || defined(__amd64__) || defined(__arm__)
40 /* No spurious failures from fcmpset. */
41 #define STRONG_FCMPSET
42 #endif
43
44 #if !defined(__LP64__) && !defined(__mips_n32) && \
45 !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \
46 !defined(HAS_EMULATED_ATOMIC64)
47 extern void atomic_add_64(volatile uint64_t *target, int64_t delta);
48 extern void atomic_dec_64(volatile uint64_t *target);
49 extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value);
50 extern uint64_t atomic_load_64(volatile uint64_t *a);
51 extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta);
52 extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp,
53 uint64_t newval);
54 #endif
55
56 #define membar_producer atomic_thread_fence_rel
57
58 static __inline uint32_t
59 atomic_add_32_nv(volatile uint32_t *target, int32_t delta)
60 {
61 return (atomic_fetchadd_32(target, delta) + delta);
62 }
63
64 static __inline u_int
65 atomic_add_int_nv(volatile u_int *target, int delta)
66 {
67 return (atomic_add_32_nv(target, delta));
68 }
69
70 static __inline void
71 atomic_inc_32(volatile uint32_t *target)
72 {
73 atomic_add_32(target, 1);
74 }
75
76 static __inline uint32_t
77 atomic_inc_32_nv(volatile uint32_t *target)
78 {
79 return (atomic_add_32_nv(target, 1));
80 }
81
82 static __inline void
83 atomic_dec_32(volatile uint32_t *target)
84 {
85 atomic_subtract_32(target, 1);
86 }
87
88 static __inline uint32_t
89 atomic_dec_32_nv(volatile uint32_t *target)
90 {
91 return (atomic_add_32_nv(target, -1));
92 }
93
94 static inline uint32_t
95 atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval)
96 {
97 #ifdef STRONG_FCMPSET
98 (void)atomic_fcmpset_32(target, &cmp, newval);
99 #else
100 uint32_t expected = cmp;
101
102 do {
103 if (atomic_fcmpset_32(target, &cmp, newval))
104 break;
105 } while (cmp == expected);
106 #endif
107 return (cmp);
108 }
109
110 #if defined(__LP64__) || defined(__mips_n32) || \
111 defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \
112 defined(HAS_EMULATED_ATOMIC64)
113 static __inline void
114 atomic_dec_64(volatile uint64_t *target)
115 {
116 atomic_subtract_64(target, 1);
117 }
118
119 static inline uint64_t
120 atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
121 {
122 return (atomic_fetchadd_64(target, delta) + delta);
123 }
124
125 static inline uint64_t
126 atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
127 {
128 #ifdef STRONG_FCMPSET
129 (void)atomic_fcmpset_64(target, &cmp, newval);
130 #else
131 uint64_t expected = cmp;
132
133 do {
134 if (atomic_fcmpset_64(target, &cmp, newval))
135 break;
136 } while (cmp == expected);
137 #endif
138 return (cmp);
139 }
140 #endif
141
142 static __inline void
143 atomic_inc_64(volatile uint64_t *target)
144 {
145 atomic_add_64(target, 1);
146 }
147
148 static __inline uint64_t
149 atomic_inc_64_nv(volatile uint64_t *target)
150 {
151 return (atomic_add_64_nv(target, 1));
152 }
153
154 static __inline uint64_t
155 atomic_dec_64_nv(volatile uint64_t *target)
156 {
157 return (atomic_add_64_nv(target, -1));
158 }
159
160 #if !defined(COMPAT_32BIT) && defined(__LP64__)
161 static __inline void *
162 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
163 {
164 return ((void *)atomic_cas_64((volatile uint64_t *)target,
165 (uint64_t)cmp, (uint64_t)newval));
166 }
167 #else
168 static __inline void *
169 atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
170 {
171 return ((void *)atomic_cas_32((volatile uint32_t *)target,
172 (uint32_t)cmp, (uint32_t)newval));
173 }
174 #endif /* !defined(COMPAT_32BIT) && defined(__LP64__) */
175
176 #endif /* !_OPENSOLARIS_SYS_ATOMIC_H_ */
Cache object: 2e10ade78b254ba3ccbfc16f923d40da
|