1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 #ifndef _LINUXKPI_ASM_ATOMIC_H_
33 #define _LINUXKPI_ASM_ATOMIC_H_
34
35 #include <linux/compiler.h>
36 #include <sys/types.h>
37 #include <machine/atomic.h>
38 #define ATOMIC_INIT(x) { .counter = (x) }
39
40 typedef struct {
41 volatile int counter;
42 } atomic_t;
43
44 /*------------------------------------------------------------------------*
45 * 32-bit atomic operations
46 *------------------------------------------------------------------------*/
47
48 #define atomic_add(i, v) atomic_add_return((i), (v))
49 #define atomic_sub(i, v) atomic_sub_return((i), (v))
50 #define atomic_inc_return(v) atomic_add_return(1, (v))
51 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
52 #define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0)
53 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
54 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
55 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
56 #define atomic_dec_return(v) atomic_sub_return(1, (v))
57 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
58
59 static inline int
60 atomic_add_return(int i, atomic_t *v)
61 {
62 return i + atomic_fetchadd_int(&v->counter, i);
63 }
64
65 static inline int
66 atomic_sub_return(int i, atomic_t *v)
67 {
68 return atomic_fetchadd_int(&v->counter, -i) - i;
69 }
70
71 static inline void
72 atomic_set(atomic_t *v, int i)
73 {
74 WRITE_ONCE(v->counter, i);
75 }
76
77 static inline void
78 atomic_set_release(atomic_t *v, int i)
79 {
80 atomic_store_rel_int(&v->counter, i);
81 }
82
83 static inline void
84 atomic_set_mask(unsigned int mask, atomic_t *v)
85 {
86 atomic_set_int(&v->counter, mask);
87 }
88
89 static inline int
90 atomic_read(const atomic_t *v)
91 {
92 return READ_ONCE(v->counter);
93 }
94
95 static inline int
96 atomic_inc(atomic_t *v)
97 {
98 return atomic_fetchadd_int(&v->counter, 1) + 1;
99 }
100
101 static inline int
102 atomic_dec(atomic_t *v)
103 {
104 return atomic_fetchadd_int(&v->counter, -1) - 1;
105 }
106
107 static inline int
108 atomic_add_unless(atomic_t *v, int a, int u)
109 {
110 int c = atomic_read(v);
111
112 for (;;) {
113 if (unlikely(c == u))
114 break;
115 if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
116 break;
117 }
118 return (c != u);
119 }
120
121 static inline int
122 atomic_fetch_add_unless(atomic_t *v, int a, int u)
123 {
124 int c = atomic_read(v);
125
126 for (;;) {
127 if (unlikely(c == u))
128 break;
129 if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
130 break;
131 }
132 return (c);
133 }
134
135 static inline void
136 atomic_clear_mask(unsigned int mask, atomic_t *v)
137 {
138 atomic_clear_int(&v->counter, mask);
139 }
140
141 static inline int
142 atomic_xchg(atomic_t *v, int i)
143 {
144 return (atomic_swap_int(&v->counter, i));
145 }
146
147 static inline int
148 atomic_cmpxchg(atomic_t *v, int old, int new)
149 {
150 int ret = old;
151
152 for (;;) {
153 if (atomic_fcmpset_int(&v->counter, &ret, new))
154 break;
155 if (ret != old)
156 break;
157 }
158 return (ret);
159 }
160
161 #if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
162 #define LINUXKPI_ATOMIC_8(...) __VA_ARGS__
163 #define LINUXKPI_ATOMIC_16(...) __VA_ARGS__
164 #else
165 #define LINUXKPI_ATOMIC_8(...)
166 #define LINUXKPI_ATOMIC_16(...)
167 #endif
168
169 #if !(defined(i386) || (defined(__powerpc__) && !defined(__powerpc64__)))
170 #define LINUXKPI_ATOMIC_64(...) __VA_ARGS__
171 #else
172 #define LINUXKPI_ATOMIC_64(...)
173 #endif
174
175 #define cmpxchg(ptr, old, new) ({ \
176 union { \
177 __typeof(*(ptr)) val; \
178 u8 u8[0]; \
179 u16 u16[0]; \
180 u32 u32[0]; \
181 u64 u64[0]; \
182 } __ret = { .val = (old) }, __new = { .val = (new) }; \
183 \
184 CTASSERT( \
185 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \
186 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \
187 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \
188 sizeof(__ret.val) == 4); \
189 \
190 switch (sizeof(__ret.val)) { \
191 LINUXKPI_ATOMIC_8( \
192 case 1: \
193 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
194 __ret.u8, __new.u8[0]) && __ret.val == (old)) \
195 ; \
196 break; \
197 ) \
198 LINUXKPI_ATOMIC_16( \
199 case 2: \
200 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
201 __ret.u16, __new.u16[0]) && __ret.val == (old)) \
202 ; \
203 break; \
204 ) \
205 case 4: \
206 while (!atomic_fcmpset_32((volatile u32 *)(ptr), \
207 __ret.u32, __new.u32[0]) && __ret.val == (old)) \
208 ; \
209 break; \
210 LINUXKPI_ATOMIC_64( \
211 case 8: \
212 while (!atomic_fcmpset_64((volatile u64 *)(ptr), \
213 __ret.u64, __new.u64[0]) && __ret.val == (old)) \
214 ; \
215 break; \
216 ) \
217 } \
218 __ret.val; \
219 })
220
221 #define cmpxchg64(...) cmpxchg(__VA_ARGS__)
222 #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__)
223
224 #define xchg(ptr, new) ({ \
225 union { \
226 __typeof(*(ptr)) val; \
227 u8 u8[0]; \
228 u16 u16[0]; \
229 u32 u32[0]; \
230 u64 u64[0]; \
231 } __ret, __new = { .val = (new) }; \
232 \
233 CTASSERT( \
234 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \
235 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \
236 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \
237 sizeof(__ret.val) == 4); \
238 \
239 switch (sizeof(__ret.val)) { \
240 LINUXKPI_ATOMIC_8( \
241 case 1: \
242 __ret.val = READ_ONCE(*ptr); \
243 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \
244 __ret.u8, __new.u8[0])) \
245 ; \
246 break; \
247 ) \
248 LINUXKPI_ATOMIC_16( \
249 case 2: \
250 __ret.val = READ_ONCE(*ptr); \
251 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \
252 __ret.u16, __new.u16[0])) \
253 ; \
254 break; \
255 ) \
256 case 4: \
257 __ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \
258 __new.u32[0]); \
259 break; \
260 LINUXKPI_ATOMIC_64( \
261 case 8: \
262 __ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \
263 __new.u64[0]); \
264 break; \
265 ) \
266 } \
267 __ret.val; \
268 })
269
270 #define try_cmpxchg(p, op, n) \
271 ({ \
272 __typeof(p) __op = (__typeof((p)))(op); \
273 __typeof(*(p)) __o = *__op; \
274 __typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n)); \
275 if (__p != __o) \
276 *__op = __p; \
277 (__p == __o); \
278 })
279
280 #define __atomic_try_cmpxchg(type, _p, _po, _n) \
281 ({ \
282 __typeof(_po) __po = (_po); \
283 __typeof(*(_po)) __r, __o = *__po; \
284 __r = atomic_cmpxchg##type((_p), __o, (_n)); \
285 if (unlikely(__r != __o)) \
286 *__po = __r; \
287 likely(__r == __o); \
288 })
289
290 #define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
291
292 static inline int
293 atomic_dec_if_positive(atomic_t *v)
294 {
295 int retval;
296 int old;
297
298 old = atomic_read(v);
299 for (;;) {
300 retval = old - 1;
301 if (unlikely(retval < 0))
302 break;
303 if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
304 break;
305 }
306 return (retval);
307 }
308
309 #define LINUX_ATOMIC_OP(op, c_op) \
310 static inline void atomic_##op(int i, atomic_t *v) \
311 { \
312 int c, old; \
313 \
314 c = v->counter; \
315 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \
316 c = old; \
317 }
318
319 #define LINUX_ATOMIC_FETCH_OP(op, c_op) \
320 static inline int atomic_fetch_##op(int i, atomic_t *v) \
321 { \
322 int c, old; \
323 \
324 c = v->counter; \
325 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \
326 c = old; \
327 \
328 return (c); \
329 }
330
331 static inline int
332 atomic_fetch_inc(atomic_t *v)
333 {
334
335 return ((atomic_inc_return(v) - 1));
336 }
337
338 LINUX_ATOMIC_OP(or, |)
339 LINUX_ATOMIC_OP(and, &)
340 LINUX_ATOMIC_OP(andnot, &~)
341 LINUX_ATOMIC_OP(xor, ^)
342
343 LINUX_ATOMIC_FETCH_OP(or, |)
344 LINUX_ATOMIC_FETCH_OP(and, &)
345 LINUX_ATOMIC_FETCH_OP(andnot, &~)
346 LINUX_ATOMIC_FETCH_OP(xor, ^)
347
348 #endif /* _LINUXKPI_ASM_ATOMIC_H_ */
Cache object: c207803115e53296e600ab3fcf09d5de
|