1 /*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
30
31 #ifndef _SYS_CDEFS_H_
32 #error this file needs sys/cdefs.h as a prerequisite
33 #endif
34
35 /*
36 * Various simple operations on memory, each of which is atomic in the
37 * presence of interrupts and multiple processors.
38 *
39 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
40 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
41 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
42 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
43 *
44 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
45 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
46 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
47 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
48 *
49 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
50 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
51 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
52 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
53 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
54 *
55 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
56 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
57 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
58 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
59 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
60 */
61
62 /*
63 * The above functions are expanded inline in the statically-linked
64 * kernel. Lock prefixes are generated if an SMP kernel is being
65 * built.
66 *
67 * Kernel modules call real functions which are built into the kernel.
68 * This allows kernel modules to be portable between UP and SMP systems.
69 */
70 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
71 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
72 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
73
74 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
75 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
76
77 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
78 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
79 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
80
81 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
82
83 /*
84 * For userland, always use lock prefixes so that the binaries will run
85 * on both SMP and !SMP systems.
86 */
87 #if defined(SMP) || !defined(_KERNEL)
88 #define MPLOCKED "lock ; "
89 #else
90 #define MPLOCKED
91 #endif
92
93 /*
94 * The assembly is volatilized to demark potential before-and-after side
95 * effects if an interrupt or SMP collision were to occur.
96 */
97 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
98 static __inline void \
99 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
100 { \
101 __asm __volatile(MPLOCKED OP \
102 : "=m" (*p) \
103 : CONS (V), "m" (*p)); \
104 } \
105 struct __hack
106
107 /*
108 * Atomic compare and set, used by the mutex functions
109 *
110 * if (*dst == exp) *dst = src (all 32 bit words)
111 *
112 * Returns 0 on failure, non-zero on success
113 */
114
115 #ifdef CPU_DISABLE_CMPXCHG
116
117 static __inline int
118 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
119 {
120 u_char res;
121
122 __asm __volatile(
123 " pushfl ; "
124 " cli ; "
125 " cmpl %3,%4 ; "
126 " jne 1f ; "
127 " movl %2,%1 ; "
128 "1: "
129 " sete %0 ; "
130 " popfl ; "
131 "# atomic_cmpset_int"
132 : "=q" (res), /* 0 */
133 "=m" (*dst) /* 1 */
134 : "r" (src), /* 2 */
135 "r" (exp), /* 3 */
136 "m" (*dst) /* 4 */
137 : "memory");
138
139 return (res);
140 }
141
142 #else /* !CPU_DISABLE_CMPXCHG */
143
144 static __inline int
145 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
146 {
147 u_char res;
148
149 __asm __volatile(
150 " " MPLOCKED " "
151 " cmpxchgl %2,%1 ; "
152 " sete %0 ; "
153 "1: "
154 "# atomic_cmpset_int"
155 : "=a" (res), /* 0 */
156 "=m" (*dst) /* 1 */
157 : "r" (src), /* 2 */
158 "a" (exp), /* 3 */
159 "m" (*dst) /* 4 */
160 : "memory");
161
162 return (res);
163 }
164
165 #endif /* CPU_DISABLE_CMPXCHG */
166
167 /*
168 * Atomically add the value of v to the integer pointed to by p and return
169 * the previous value of *p.
170 */
171 static __inline u_int
172 atomic_fetchadd_int(volatile u_int *p, u_int v)
173 {
174
175 __asm __volatile(
176 " " MPLOCKED " "
177 " xaddl %0, %1 ; "
178 "# atomic_fetchadd_int"
179 : "+r" (v), /* 0 (result) */
180 "=m" (*p) /* 1 */
181 : "m" (*p)); /* 2 */
182
183 return (v);
184 }
185
186 #if defined(_KERNEL) && !defined(SMP)
187
188 /*
189 * We assume that a = b will do atomic loads and stores. However, on a
190 * PentiumPro or higher, reads may pass writes, so for that case we have
191 * to use a serializing instruction (i.e. with LOCK) to do the load in
192 * SMP kernels. For UP kernels, however, the cache of the single processor
193 * is always consistent, so we don't need any memory barriers.
194 */
195 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
196 static __inline u_##TYPE \
197 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
198 { \
199 return (*p); \
200 } \
201 \
202 static __inline void \
203 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
204 { \
205 *p = v; \
206 } \
207 struct __hack
208
209 #else /* !(_KERNEL && !SMP) */
210
211 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
212 static __inline u_##TYPE \
213 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
214 { \
215 u_##TYPE res; \
216 \
217 __asm __volatile(MPLOCKED LOP \
218 : "=a" (res), /* 0 */ \
219 "=m" (*p) /* 1 */ \
220 : "m" (*p) /* 2 */ \
221 : "memory"); \
222 \
223 return (res); \
224 } \
225 \
226 /* \
227 * The XCHG instruction asserts LOCK automagically. \
228 */ \
229 static __inline void \
230 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
231 { \
232 __asm __volatile(SOP \
233 : "=m" (*p), /* 0 */ \
234 "+r" (v) /* 1 */ \
235 : "m" (*p)); /* 2 */ \
236 } \
237 struct __hack
238
239 #endif /* _KERNEL && !SMP */
240
241 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
242
243 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
244 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
245 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
246 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
247
248 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
249 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
250 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
251 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
252
253 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
254 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
255 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
256 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
257
258 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
259 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
260 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
261 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
262
263 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
264 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
265 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0");
266 ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0");
267
268 #undef ATOMIC_ASM
269 #undef ATOMIC_STORE_LOAD
270
271 #ifndef WANT_FUNCTIONS
272
273 static __inline int
274 atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
275 {
276
277 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
278 (u_int)src));
279 }
280
281 static __inline u_long
282 atomic_fetchadd_long(volatile u_long *p, u_long v)
283 {
284
285 return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
286 }
287
288 /* Read the current value and store a zero in the destination. */
289 #ifdef __GNUCLIKE_ASM
290
291 static __inline u_int
292 atomic_readandclear_int(volatile u_int *addr)
293 {
294 u_int res;
295
296 res = 0;
297 __asm __volatile(
298 " xchgl %1,%0 ; "
299 "# atomic_readandclear_int"
300 : "+r" (res), /* 0 */
301 "=m" (*addr) /* 1 */
302 : "m" (*addr));
303
304 return (res);
305 }
306
307 static __inline u_long
308 atomic_readandclear_long(volatile u_long *addr)
309 {
310 u_long res;
311
312 res = 0;
313 __asm __volatile(
314 " xchgl %1,%0 ; "
315 "# atomic_readandclear_long"
316 : "+r" (res), /* 0 */
317 "=m" (*addr) /* 1 */
318 : "m" (*addr));
319
320 return (res);
321 }
322
323 #else /* !__GNUCLIKE_ASM */
324
325 u_int atomic_readandclear_int(volatile u_int *addr);
326 u_long atomic_readandclear_long(volatile u_long *addr);
327
328 #endif /* __GNUCLIKE_ASM */
329
330 /* Acquire and release variants are identical to the normal ones. */
331 #define atomic_set_acq_char atomic_set_char
332 #define atomic_set_rel_char atomic_set_char
333 #define atomic_clear_acq_char atomic_clear_char
334 #define atomic_clear_rel_char atomic_clear_char
335 #define atomic_add_acq_char atomic_add_char
336 #define atomic_add_rel_char atomic_add_char
337 #define atomic_subtract_acq_char atomic_subtract_char
338 #define atomic_subtract_rel_char atomic_subtract_char
339
340 #define atomic_set_acq_short atomic_set_short
341 #define atomic_set_rel_short atomic_set_short
342 #define atomic_clear_acq_short atomic_clear_short
343 #define atomic_clear_rel_short atomic_clear_short
344 #define atomic_add_acq_short atomic_add_short
345 #define atomic_add_rel_short atomic_add_short
346 #define atomic_subtract_acq_short atomic_subtract_short
347 #define atomic_subtract_rel_short atomic_subtract_short
348
349 #define atomic_set_acq_int atomic_set_int
350 #define atomic_set_rel_int atomic_set_int
351 #define atomic_clear_acq_int atomic_clear_int
352 #define atomic_clear_rel_int atomic_clear_int
353 #define atomic_add_acq_int atomic_add_int
354 #define atomic_add_rel_int atomic_add_int
355 #define atomic_subtract_acq_int atomic_subtract_int
356 #define atomic_subtract_rel_int atomic_subtract_int
357 #define atomic_cmpset_acq_int atomic_cmpset_int
358 #define atomic_cmpset_rel_int atomic_cmpset_int
359
360 #define atomic_set_acq_long atomic_set_long
361 #define atomic_set_rel_long atomic_set_long
362 #define atomic_clear_acq_long atomic_clear_long
363 #define atomic_clear_rel_long atomic_clear_long
364 #define atomic_add_acq_long atomic_add_long
365 #define atomic_add_rel_long atomic_add_long
366 #define atomic_subtract_acq_long atomic_subtract_long
367 #define atomic_subtract_rel_long atomic_subtract_long
368 #define atomic_cmpset_acq_long atomic_cmpset_long
369 #define atomic_cmpset_rel_long atomic_cmpset_long
370
371 /* Operations on 8-bit bytes. */
372 #define atomic_set_8 atomic_set_char
373 #define atomic_set_acq_8 atomic_set_acq_char
374 #define atomic_set_rel_8 atomic_set_rel_char
375 #define atomic_clear_8 atomic_clear_char
376 #define atomic_clear_acq_8 atomic_clear_acq_char
377 #define atomic_clear_rel_8 atomic_clear_rel_char
378 #define atomic_add_8 atomic_add_char
379 #define atomic_add_acq_8 atomic_add_acq_char
380 #define atomic_add_rel_8 atomic_add_rel_char
381 #define atomic_subtract_8 atomic_subtract_char
382 #define atomic_subtract_acq_8 atomic_subtract_acq_char
383 #define atomic_subtract_rel_8 atomic_subtract_rel_char
384 #define atomic_load_acq_8 atomic_load_acq_char
385 #define atomic_store_rel_8 atomic_store_rel_char
386
387 /* Operations on 16-bit words. */
388 #define atomic_set_16 atomic_set_short
389 #define atomic_set_acq_16 atomic_set_acq_short
390 #define atomic_set_rel_16 atomic_set_rel_short
391 #define atomic_clear_16 atomic_clear_short
392 #define atomic_clear_acq_16 atomic_clear_acq_short
393 #define atomic_clear_rel_16 atomic_clear_rel_short
394 #define atomic_add_16 atomic_add_short
395 #define atomic_add_acq_16 atomic_add_acq_short
396 #define atomic_add_rel_16 atomic_add_rel_short
397 #define atomic_subtract_16 atomic_subtract_short
398 #define atomic_subtract_acq_16 atomic_subtract_acq_short
399 #define atomic_subtract_rel_16 atomic_subtract_rel_short
400 #define atomic_load_acq_16 atomic_load_acq_short
401 #define atomic_store_rel_16 atomic_store_rel_short
402
403 /* Operations on 32-bit double words. */
404 #define atomic_set_32 atomic_set_int
405 #define atomic_set_acq_32 atomic_set_acq_int
406 #define atomic_set_rel_32 atomic_set_rel_int
407 #define atomic_clear_32 atomic_clear_int
408 #define atomic_clear_acq_32 atomic_clear_acq_int
409 #define atomic_clear_rel_32 atomic_clear_rel_int
410 #define atomic_add_32 atomic_add_int
411 #define atomic_add_acq_32 atomic_add_acq_int
412 #define atomic_add_rel_32 atomic_add_rel_int
413 #define atomic_subtract_32 atomic_subtract_int
414 #define atomic_subtract_acq_32 atomic_subtract_acq_int
415 #define atomic_subtract_rel_32 atomic_subtract_rel_int
416 #define atomic_load_acq_32 atomic_load_acq_int
417 #define atomic_store_rel_32 atomic_store_rel_int
418 #define atomic_cmpset_32 atomic_cmpset_int
419 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
420 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
421 #define atomic_readandclear_32 atomic_readandclear_int
422 #define atomic_fetchadd_32 atomic_fetchadd_int
423
424 /* Operations on pointers. */
425 #define atomic_set_ptr(p, v) \
426 atomic_set_int((volatile u_int *)(p), (u_int)(v))
427 #define atomic_set_acq_ptr(p, v) \
428 atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
429 #define atomic_set_rel_ptr(p, v) \
430 atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
431 #define atomic_clear_ptr(p, v) \
432 atomic_clear_int((volatile u_int *)(p), (u_int)(v))
433 #define atomic_clear_acq_ptr(p, v) \
434 atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
435 #define atomic_clear_rel_ptr(p, v) \
436 atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
437 #define atomic_add_ptr(p, v) \
438 atomic_add_int((volatile u_int *)(p), (u_int)(v))
439 #define atomic_add_acq_ptr(p, v) \
440 atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
441 #define atomic_add_rel_ptr(p, v) \
442 atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
443 #define atomic_subtract_ptr(p, v) \
444 atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
445 #define atomic_subtract_acq_ptr(p, v) \
446 atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
447 #define atomic_subtract_rel_ptr(p, v) \
448 atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
449 #define atomic_load_acq_ptr(p) \
450 atomic_load_acq_int((volatile u_int *)(p))
451 #define atomic_store_rel_ptr(p, v) \
452 atomic_store_rel_int((volatile u_int *)(p), (v))
453 #define atomic_cmpset_ptr(dst, old, new) \
454 atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
455 #define atomic_cmpset_acq_ptr(dst, old, new) \
456 atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
457 (u_int)(new))
458 #define atomic_cmpset_rel_ptr(dst, old, new) \
459 atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
460 (u_int)(new))
461 #define atomic_readandclear_ptr(p) \
462 atomic_readandclear_int((volatile u_int *)(p))
463
464 #endif /* !WANT_FUNCTIONS */
465
466 #endif /* !_MACHINE_ATOMIC_H_ */
Cache object: 89b1e364b2f31786cec92eec159ce3f9
|