1 /*-
2 * Copyright (c) 1998 Doug Rabson.
3 * Copyright (c) 2001 Jake Burkholder.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * from: FreeBSD: src/sys/i386/include/atomic.h,v 1.20 2001/02/11
28 * $FreeBSD: releng/10.1/sys/sparc64/include/atomic.h 253994 2013-08-06 15:34:11Z marius $
29 */
30
31 #ifndef _MACHINE_ATOMIC_H_
32 #define _MACHINE_ATOMIC_H_
33
34 #include <machine/cpufunc.h>
35
36 #define mb() __asm__ __volatile__ ("membar #MemIssue": : :"memory")
37 #define wmb() mb()
38 #define rmb() mb()
39
40 /* Userland needs different ASI's. */
41 #ifdef _KERNEL
42 #define __ASI_ATOMIC ASI_N
43 #else
44 #define __ASI_ATOMIC ASI_P
45 #endif
46
47 /*
48 * Various simple arithmetic on memory which is atomic in the presence
49 * of interrupts and multiple processors. See atomic(9) for details.
50 * Note that efficient hardware support exists only for the 32 and 64
51 * bit variants; the 8 and 16 bit versions are not provided and should
52 * not be used in MI code.
53 *
54 * This implementation takes advantage of the fact that the sparc64
55 * cas instruction is both a load and a store. The loop is often coded
56 * as follows:
57 *
58 * do {
59 * expect = *p;
60 * new = expect + 1;
61 * } while (cas(p, expect, new) != expect);
62 *
63 * which performs an unnnecessary load on each iteration that the cas
64 * operation fails. Modified as follows:
65 *
66 * expect = *p;
67 * for (;;) {
68 * new = expect + 1;
69 * result = cas(p, expect, new);
70 * if (result == expect)
71 * break;
72 * expect = result;
73 * }
74 *
75 * the return value of cas is used to avoid the extra reload.
76 *
77 * We only include a memory barrier in the rel variants as in total store
78 * order which we use for running the kernel and all of the userland atomic
79 * loads and stores behave as if the were followed by a membar with a mask
80 * of #LoadLoad | #LoadStore | #StoreStore. In order to be also sufficient
81 * for use of relaxed memory ordering, the atomic_cas() in the acq variants
82 * additionally would have to be followed by a membar #LoadLoad | #LoadStore.
83 * Due to the suggested assembly syntax of the membar operands containing a
84 * # character, they cannot be used in macros. The cmask and mmask bits thus
85 * are hard coded in machine/cpufunc.h and used here through macros.
86 * Hopefully the bit numbers won't change in the future.
87 */
88
89 #define itype(sz) uint ## sz ## _t
90
91 #define atomic_cas_32(p, e, s) casa((p), (e), (s), __ASI_ATOMIC)
92 #define atomic_cas_64(p, e, s) casxa((p), (e), (s), __ASI_ATOMIC)
93
94 #define atomic_cas(p, e, s, sz) \
95 atomic_cas_ ## sz((p), (e), (s))
96
97 #define atomic_cas_acq(p, e, s, sz) ({ \
98 itype(sz) v; \
99 v = atomic_cas((p), (e), (s), sz); \
100 __compiler_membar(); \
101 v; \
102 })
103
104 #define atomic_cas_rel(p, e, s, sz) ({ \
105 itype(sz) v; \
106 membar(LoadStore | StoreStore); \
107 v = atomic_cas((p), (e), (s), sz); \
108 v; \
109 })
110
111 #define atomic_op(p, op, v, sz) ({ \
112 itype(sz) e, r, s; \
113 for (e = *(volatile itype(sz) *)(p);; e = r) { \
114 s = e op (v); \
115 r = atomic_cas_ ## sz((p), e, s); \
116 if (r == e) \
117 break; \
118 } \
119 e; \
120 })
121
122 #define atomic_op_acq(p, op, v, sz) ({ \
123 itype(sz) t; \
124 t = atomic_op((p), op, (v), sz); \
125 __compiler_membar(); \
126 t; \
127 })
128
129 #define atomic_op_rel(p, op, v, sz) ({ \
130 itype(sz) t; \
131 membar(LoadStore | StoreStore); \
132 t = atomic_op((p), op, (v), sz); \
133 t; \
134 })
135
136 #define atomic_ld_acq(p, sz) ({ \
137 itype(sz) v; \
138 v = atomic_cas((p), 0, 0, sz); \
139 __compiler_membar(); \
140 v; \
141 })
142
143 #define atomic_ld_clear(p, sz) ({ \
144 itype(sz) e, r; \
145 for (e = *(volatile itype(sz) *)(p);; e = r) { \
146 r = atomic_cas((p), e, 0, sz); \
147 if (r == e) \
148 break; \
149 } \
150 e; \
151 })
152
153 #define atomic_st(p, v, sz) do { \
154 itype(sz) e, r; \
155 for (e = *(volatile itype(sz) *)(p);; e = r) { \
156 r = atomic_cas((p), e, (v), sz); \
157 if (r == e) \
158 break; \
159 } \
160 } while (0)
161
162 #define atomic_st_acq(p, v, sz) do { \
163 atomic_st((p), (v), sz); \
164 __compiler_membar(); \
165 } while (0)
166
167 #define atomic_st_rel(p, v, sz) do { \
168 membar(LoadStore | StoreStore); \
169 atomic_st((p), (v), sz); \
170 } while (0)
171
172 #define ATOMIC_GEN(name, ptype, vtype, atype, sz) \
173 \
174 static __inline vtype \
175 atomic_add_ ## name(volatile ptype p, atype v) \
176 { \
177 return ((vtype)atomic_op((p), +, (v), sz)); \
178 } \
179 static __inline vtype \
180 atomic_add_acq_ ## name(volatile ptype p, atype v) \
181 { \
182 return ((vtype)atomic_op_acq((p), +, (v), sz)); \
183 } \
184 static __inline vtype \
185 atomic_add_rel_ ## name(volatile ptype p, atype v) \
186 { \
187 return ((vtype)atomic_op_rel((p), +, (v), sz)); \
188 } \
189 \
190 static __inline vtype \
191 atomic_clear_ ## name(volatile ptype p, atype v) \
192 { \
193 return ((vtype)atomic_op((p), &, ~(v), sz)); \
194 } \
195 static __inline vtype \
196 atomic_clear_acq_ ## name(volatile ptype p, atype v) \
197 { \
198 return ((vtype)atomic_op_acq((p), &, ~(v), sz)); \
199 } \
200 static __inline vtype \
201 atomic_clear_rel_ ## name(volatile ptype p, atype v) \
202 { \
203 return ((vtype)atomic_op_rel((p), &, ~(v), sz)); \
204 } \
205 \
206 static __inline int \
207 atomic_cmpset_ ## name(volatile ptype p, vtype e, vtype s) \
208 { \
209 return (((vtype)atomic_cas((p), (e), (s), sz)) == (e)); \
210 } \
211 static __inline int \
212 atomic_cmpset_acq_ ## name(volatile ptype p, vtype e, vtype s) \
213 { \
214 return (((vtype)atomic_cas_acq((p), (e), (s), sz)) == (e)); \
215 } \
216 static __inline int \
217 atomic_cmpset_rel_ ## name(volatile ptype p, vtype e, vtype s) \
218 { \
219 return (((vtype)atomic_cas_rel((p), (e), (s), sz)) == (e)); \
220 } \
221 \
222 static __inline vtype \
223 atomic_load_ ## name(volatile ptype p) \
224 { \
225 return ((vtype)atomic_cas((p), 0, 0, sz)); \
226 } \
227 static __inline vtype \
228 atomic_load_acq_ ## name(volatile ptype p) \
229 { \
230 return ((vtype)atomic_cas_acq((p), 0, 0, sz)); \
231 } \
232 \
233 static __inline vtype \
234 atomic_readandclear_ ## name(volatile ptype p) \
235 { \
236 return ((vtype)atomic_ld_clear((p), sz)); \
237 } \
238 \
239 static __inline vtype \
240 atomic_set_ ## name(volatile ptype p, atype v) \
241 { \
242 return ((vtype)atomic_op((p), |, (v), sz)); \
243 } \
244 static __inline vtype \
245 atomic_set_acq_ ## name(volatile ptype p, atype v) \
246 { \
247 return ((vtype)atomic_op_acq((p), |, (v), sz)); \
248 } \
249 static __inline vtype \
250 atomic_set_rel_ ## name(volatile ptype p, atype v) \
251 { \
252 return ((vtype)atomic_op_rel((p), |, (v), sz)); \
253 } \
254 \
255 static __inline vtype \
256 atomic_subtract_ ## name(volatile ptype p, atype v) \
257 { \
258 return ((vtype)atomic_op((p), -, (v), sz)); \
259 } \
260 static __inline vtype \
261 atomic_subtract_acq_ ## name(volatile ptype p, atype v) \
262 { \
263 return ((vtype)atomic_op_acq((p), -, (v), sz)); \
264 } \
265 static __inline vtype \
266 atomic_subtract_rel_ ## name(volatile ptype p, atype v) \
267 { \
268 return ((vtype)atomic_op_rel((p), -, (v), sz)); \
269 } \
270 \
271 static __inline void \
272 atomic_store_acq_ ## name(volatile ptype p, vtype v) \
273 { \
274 atomic_st_acq((p), (v), sz); \
275 } \
276 static __inline void \
277 atomic_store_rel_ ## name(volatile ptype p, vtype v) \
278 { \
279 atomic_st_rel((p), (v), sz); \
280 }
281
282 ATOMIC_GEN(int, u_int *, u_int, u_int, 32);
283 ATOMIC_GEN(32, uint32_t *, uint32_t, uint32_t, 32);
284
285 ATOMIC_GEN(long, u_long *, u_long, u_long, 64);
286 ATOMIC_GEN(64, uint64_t *, uint64_t, uint64_t, 64);
287
288 ATOMIC_GEN(ptr, uintptr_t *, uintptr_t, uintptr_t, 64);
289
290 #define atomic_fetchadd_int atomic_add_int
291 #define atomic_fetchadd_32 atomic_add_32
292 #define atomic_fetchadd_long atomic_add_long
293
294 #undef ATOMIC_GEN
295 #undef atomic_cas
296 #undef atomic_cas_acq
297 #undef atomic_cas_rel
298 #undef atomic_op
299 #undef atomic_op_acq
300 #undef atomic_op_rel
301 #undef atomic_ld_acq
302 #undef atomic_ld_clear
303 #undef atomic_st
304 #undef atomic_st_acq
305 #undef atomic_st_rel
306
307 #endif /* !_MACHINE_ATOMIC_H_ */
Cache object: c4b5d23bbfc6034c37fffad6c1bd4f04
|