1 /*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/5.3/sys/i386/include/atomic.h 126891 2004-03-12 21:45:33Z trhodes $
27 */
28 #ifndef _MACHINE_ATOMIC_H_
29 #define _MACHINE_ATOMIC_H_
30
31 /*
32 * Various simple arithmetic on memory which is atomic in the presence
33 * of interrupts and multiple processors.
34 *
35 * atomic_set_char(P, V) (*(u_char*)(P) |= (V))
36 * atomic_clear_char(P, V) (*(u_char*)(P) &= ~(V))
37 * atomic_add_char(P, V) (*(u_char*)(P) += (V))
38 * atomic_subtract_char(P, V) (*(u_char*)(P) -= (V))
39 *
40 * atomic_set_short(P, V) (*(u_short*)(P) |= (V))
41 * atomic_clear_short(P, V) (*(u_short*)(P) &= ~(V))
42 * atomic_add_short(P, V) (*(u_short*)(P) += (V))
43 * atomic_subtract_short(P, V) (*(u_short*)(P) -= (V))
44 *
45 * atomic_set_int(P, V) (*(u_int*)(P) |= (V))
46 * atomic_clear_int(P, V) (*(u_int*)(P) &= ~(V))
47 * atomic_add_int(P, V) (*(u_int*)(P) += (V))
48 * atomic_subtract_int(P, V) (*(u_int*)(P) -= (V))
49 * atomic_readandclear_int(P) (return *(u_int*)P; *(u_int*)P = 0;)
50 *
51 * atomic_set_long(P, V) (*(u_long*)(P) |= (V))
52 * atomic_clear_long(P, V) (*(u_long*)(P) &= ~(V))
53 * atomic_add_long(P, V) (*(u_long*)(P) += (V))
54 * atomic_subtract_long(P, V) (*(u_long*)(P) -= (V))
55 * atomic_readandclear_long(P) (return *(u_long*)P; *(u_long*)P = 0;)
56 */
57
58 /*
59 * The above functions are expanded inline in the statically-linked
60 * kernel. Lock prefixes are generated if an SMP kernel is being
61 * built.
62 *
63 * Kernel modules call real functions which are built into the kernel.
64 * This allows kernel modules to be portable between UP and SMP systems.
65 */
66 #if defined(KLD_MODULE)
67 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
68 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
69
70 int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
71
72 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
73 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
74 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
75
76 #else /* !KLD_MODULE */
77
78 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
79
80 /*
81 * For userland, assume the SMP case and use lock prefixes so that
82 * the binaries will run on both types of systems.
83 */
84 #if defined(SMP) || !defined(_KERNEL)
85 #define MPLOCKED lock ;
86 #else
87 #define MPLOCKED
88 #endif
89
90 /*
91 * The assembly is volatilized to demark potential before-and-after side
92 * effects if an interrupt or SMP collision were to occur.
93 */
94 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
95 static __inline void \
96 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
97 { \
98 __asm __volatile(__XSTRING(MPLOCKED) OP \
99 : "+m" (*p) \
100 : CONS (V)); \
101 } \
102 struct __hack
103
104 #else /* !(__GNUC__ || __INTEL_COMPILER) */
105
106 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
107 extern void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
108
109 #endif /* __GNUC__ || __INTEL_COMPILER */
110
111 /*
112 * Atomic compare and set, used by the mutex functions
113 *
114 * if (*dst == exp) *dst = src (all 32 bit words)
115 *
116 * Returns 0 on failure, non-zero on success
117 */
118
119 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
120
121 #if defined(I386_CPU) || defined(CPU_DISABLE_CMPXCHG)
122
123 static __inline int
124 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
125 {
126 int res = exp;
127
128 __asm __volatile(
129 " pushfl ; "
130 " cli ; "
131 " cmpl %0,%2 ; "
132 " jne 1f ; "
133 " movl %1,%2 ; "
134 "1: "
135 " sete %%al; "
136 " movzbl %%al,%0 ; "
137 " popfl ; "
138 "# atomic_cmpset_int"
139 : "+a" (res) /* 0 (result) */
140 : "r" (src), /* 1 */
141 "m" (*(dst)) /* 2 */
142 : "memory");
143
144 return (res);
145 }
146
147 #else /* defined(I386_CPU) */
148
149 static __inline int
150 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
151 {
152 int res = exp;
153
154 __asm __volatile (
155 " " __XSTRING(MPLOCKED) " "
156 " cmpxchgl %1,%2 ; "
157 " setz %%al ; "
158 " movzbl %%al,%0 ; "
159 "1: "
160 "# atomic_cmpset_int"
161 : "+a" (res) /* 0 (result) */
162 : "r" (src), /* 1 */
163 "m" (*(dst)) /* 2 */
164 : "memory");
165
166 return (res);
167 }
168
169 #endif /* defined(I386_CPU) */
170
171 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
172
173 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
174
175 #if defined(I386_CPU)
176
177 /*
178 * We assume that a = b will do atomic loads and stores.
179 *
180 * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
181 * memory ordering. These should only be used on a 386.
182 */
183 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
184 static __inline u_##TYPE \
185 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
186 { \
187 return (*p); \
188 } \
189 \
190 static __inline void \
191 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
192 { \
193 *p = v; \
194 __asm __volatile("" : : : "memory"); \
195 } \
196 struct __hack
197
198 #else /* !defined(I386_CPU) */
199
200 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
201 static __inline u_##TYPE \
202 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
203 { \
204 u_##TYPE res; \
205 \
206 __asm __volatile(__XSTRING(MPLOCKED) LOP \
207 : "=a" (res), /* 0 (result) */\
208 "+m" (*p) /* 1 */ \
209 : : "memory"); \
210 \
211 return (res); \
212 } \
213 \
214 /* \
215 * The XCHG instruction asserts LOCK automagically. \
216 */ \
217 static __inline void \
218 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
219 { \
220 __asm __volatile(SOP \
221 : "+m" (*p), /* 0 */ \
222 "+r" (v) /* 1 */ \
223 : : "memory"); \
224 } \
225 struct __hack
226
227 #endif /* defined(I386_CPU) */
228
229 #else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */
230
231 extern int atomic_cmpset_int(volatile u_int *, u_int, u_int);
232
233 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
234 extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \
235 extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
236
237 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
238
239 #endif /* KLD_MODULE */
240
241 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
242 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
243 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
244 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
245
246 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
247 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
248 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
249 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
250
251 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
252 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
253 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
254 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
255
256 ATOMIC_ASM(set, long, "orl %1,%0", "ir", v);
257 ATOMIC_ASM(clear, long, "andl %1,%0", "ir", ~v);
258 ATOMIC_ASM(add, long, "addl %1,%0", "ir", v);
259 ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v);
260
261 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
262 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
263 ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0");
264 ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0");
265
266 #undef ATOMIC_ASM
267 #undef ATOMIC_STORE_LOAD
268
269 #define atomic_set_acq_char atomic_set_char
270 #define atomic_set_rel_char atomic_set_char
271 #define atomic_clear_acq_char atomic_clear_char
272 #define atomic_clear_rel_char atomic_clear_char
273 #define atomic_add_acq_char atomic_add_char
274 #define atomic_add_rel_char atomic_add_char
275 #define atomic_subtract_acq_char atomic_subtract_char
276 #define atomic_subtract_rel_char atomic_subtract_char
277
278 #define atomic_set_acq_short atomic_set_short
279 #define atomic_set_rel_short atomic_set_short
280 #define atomic_clear_acq_short atomic_clear_short
281 #define atomic_clear_rel_short atomic_clear_short
282 #define atomic_add_acq_short atomic_add_short
283 #define atomic_add_rel_short atomic_add_short
284 #define atomic_subtract_acq_short atomic_subtract_short
285 #define atomic_subtract_rel_short atomic_subtract_short
286
287 #define atomic_set_acq_int atomic_set_int
288 #define atomic_set_rel_int atomic_set_int
289 #define atomic_clear_acq_int atomic_clear_int
290 #define atomic_clear_rel_int atomic_clear_int
291 #define atomic_add_acq_int atomic_add_int
292 #define atomic_add_rel_int atomic_add_int
293 #define atomic_subtract_acq_int atomic_subtract_int
294 #define atomic_subtract_rel_int atomic_subtract_int
295 #define atomic_cmpset_acq_int atomic_cmpset_int
296 #define atomic_cmpset_rel_int atomic_cmpset_int
297
298 #define atomic_set_acq_long atomic_set_long
299 #define atomic_set_rel_long atomic_set_long
300 #define atomic_clear_acq_long atomic_clear_long
301 #define atomic_clear_rel_long atomic_clear_long
302 #define atomic_add_acq_long atomic_add_long
303 #define atomic_add_rel_long atomic_add_long
304 #define atomic_subtract_acq_long atomic_subtract_long
305 #define atomic_subtract_rel_long atomic_subtract_long
306 #define atomic_cmpset_long atomic_cmpset_int
307 #define atomic_cmpset_acq_long atomic_cmpset_acq_int
308 #define atomic_cmpset_rel_long atomic_cmpset_rel_int
309
310 #define atomic_cmpset_acq_ptr atomic_cmpset_ptr
311 #define atomic_cmpset_rel_ptr atomic_cmpset_ptr
312
313 #define atomic_set_8 atomic_set_char
314 #define atomic_set_acq_8 atomic_set_acq_char
315 #define atomic_set_rel_8 atomic_set_rel_char
316 #define atomic_clear_8 atomic_clear_char
317 #define atomic_clear_acq_8 atomic_clear_acq_char
318 #define atomic_clear_rel_8 atomic_clear_rel_char
319 #define atomic_add_8 atomic_add_char
320 #define atomic_add_acq_8 atomic_add_acq_char
321 #define atomic_add_rel_8 atomic_add_rel_char
322 #define atomic_subtract_8 atomic_subtract_char
323 #define atomic_subtract_acq_8 atomic_subtract_acq_char
324 #define atomic_subtract_rel_8 atomic_subtract_rel_char
325 #define atomic_load_acq_8 atomic_load_acq_char
326 #define atomic_store_rel_8 atomic_store_rel_char
327
328 #define atomic_set_16 atomic_set_short
329 #define atomic_set_acq_16 atomic_set_acq_short
330 #define atomic_set_rel_16 atomic_set_rel_short
331 #define atomic_clear_16 atomic_clear_short
332 #define atomic_clear_acq_16 atomic_clear_acq_short
333 #define atomic_clear_rel_16 atomic_clear_rel_short
334 #define atomic_add_16 atomic_add_short
335 #define atomic_add_acq_16 atomic_add_acq_short
336 #define atomic_add_rel_16 atomic_add_rel_short
337 #define atomic_subtract_16 atomic_subtract_short
338 #define atomic_subtract_acq_16 atomic_subtract_acq_short
339 #define atomic_subtract_rel_16 atomic_subtract_rel_short
340 #define atomic_load_acq_16 atomic_load_acq_short
341 #define atomic_store_rel_16 atomic_store_rel_short
342
343 #define atomic_set_32 atomic_set_int
344 #define atomic_set_acq_32 atomic_set_acq_int
345 #define atomic_set_rel_32 atomic_set_rel_int
346 #define atomic_clear_32 atomic_clear_int
347 #define atomic_clear_acq_32 atomic_clear_acq_int
348 #define atomic_clear_rel_32 atomic_clear_rel_int
349 #define atomic_add_32 atomic_add_int
350 #define atomic_add_acq_32 atomic_add_acq_int
351 #define atomic_add_rel_32 atomic_add_rel_int
352 #define atomic_subtract_32 atomic_subtract_int
353 #define atomic_subtract_acq_32 atomic_subtract_acq_int
354 #define atomic_subtract_rel_32 atomic_subtract_rel_int
355 #define atomic_load_acq_32 atomic_load_acq_int
356 #define atomic_store_rel_32 atomic_store_rel_int
357 #define atomic_cmpset_32 atomic_cmpset_int
358 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
359 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
360 #define atomic_readandclear_32 atomic_readandclear_int
361
362 #if !defined(WANT_FUNCTIONS)
363 static __inline int
364 atomic_cmpset_ptr(volatile void *dst, void *exp, void *src)
365 {
366
367 return (atomic_cmpset_int((volatile u_int *)dst, (u_int)exp,
368 (u_int)src));
369 }
370
371 static __inline void *
372 atomic_load_acq_ptr(volatile void *p)
373 {
374 /*
375 * The apparently-bogus cast to intptr_t in the following is to
376 * avoid a warning from "gcc -Wbad-function-cast".
377 */
378 return ((void *)(intptr_t)atomic_load_acq_int((volatile u_int *)p));
379 }
380
381 static __inline void
382 atomic_store_rel_ptr(volatile void *p, void *v)
383 {
384 atomic_store_rel_int((volatile u_int *)p, (u_int)v);
385 }
386
387 #define ATOMIC_PTR(NAME) \
388 static __inline void \
389 atomic_##NAME##_ptr(volatile void *p, uintptr_t v) \
390 { \
391 atomic_##NAME##_int((volatile u_int *)p, v); \
392 } \
393 \
394 static __inline void \
395 atomic_##NAME##_acq_ptr(volatile void *p, uintptr_t v) \
396 { \
397 atomic_##NAME##_acq_int((volatile u_int *)p, v);\
398 } \
399 \
400 static __inline void \
401 atomic_##NAME##_rel_ptr(volatile void *p, uintptr_t v) \
402 { \
403 atomic_##NAME##_rel_int((volatile u_int *)p, v);\
404 }
405
406 ATOMIC_PTR(set)
407 ATOMIC_PTR(clear)
408 ATOMIC_PTR(add)
409 ATOMIC_PTR(subtract)
410
411 #undef ATOMIC_PTR
412
413 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
414
415 static __inline u_int
416 atomic_readandclear_int(volatile u_int *addr)
417 {
418 u_int result;
419
420 __asm __volatile (
421 " xorl %0,%0 ; "
422 " xchgl %1,%0 ; "
423 "# atomic_readandclear_int"
424 : "=&r" (result) /* 0 (result) */
425 : "m" (*addr)); /* 1 (addr) */
426
427 return (result);
428 }
429
430 static __inline u_long
431 atomic_readandclear_long(volatile u_long *addr)
432 {
433 u_long result;
434
435 __asm __volatile (
436 " xorl %0,%0 ; "
437 " xchgl %1,%0 ; "
438 "# atomic_readandclear_int"
439 : "=&r" (result) /* 0 (result) */
440 : "m" (*addr)); /* 1 (addr) */
441
442 return (result);
443 }
444
445 #else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */
446
447 extern u_long atomic_readandclear_long(volatile u_long *);
448 extern u_int atomic_readandclear_int(volatile u_int *);
449
450 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
451
452 #endif /* !defined(WANT_FUNCTIONS) */
453 #endif /* ! _MACHINE_ATOMIC_H_ */
Cache object: 5f4be599ca79914fbf5c0781906589cb
|