1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35 #ifndef _OS_OSATOMIC_H
36 #define _OS_OSATOMIC_H
37
38 #include <libkern/OSBase.h>
39
40 #if defined(__cplusplus)
41 extern "C" {
42 #endif
43
44 /*!
45 * @header
46 *
47 * @abstract
48 * This header declares the OSAtomic group of functions for atomic
49 * reading and updating of values.
50 */
51
52 #if defined(__i386__) || defined(__x86_64__)
53
54 /*!
55 * @function OSCompareAndSwap64
56 *
57 * @abstract
58 * 64-bit compare and swap operation.
59 *
60 * @discussion
61 * See OSCompareAndSwap.
62 */
63 extern Boolean OSCompareAndSwap64(
64 UInt64 oldValue,
65 UInt64 newValue,
66 volatile UInt64 * address);
67
68 #endif /* defined(__i386__) || defined(__x86_64__) */
69
70 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
71
72 /*!
73 * @function OSAddAtomic64
74 *
75 * @abstract
76 * 64-bit atomic add operation.
77 *
78 * @discussion
79 * See OSAddAtomic.
80 */
81 extern SInt64 OSAddAtomic64(
82 SInt64 theAmount,
83 volatile SInt64 * address);
84
85 /*!
86 * @function OSIncrementAtomic64
87 *
88 * @abstract
89 * 64-bit increment.
90 *
91 * @discussion
92 * See OSIncrementAtomic.
93 */
94 inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address)
95 {
96 return OSAddAtomic64(1LL, address);
97 }
98
99 /*!
100 * @function OSDecrementAtomic64
101 *
102 * @abstract
103 * 64-bit decrement.
104 *
105 * @discussion
106 * See OSDecrementAtomic.
107 */
108 inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address)
109 {
110 return OSAddAtomic64(-1LL, address);
111 }
112
113 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
114
115 #if XNU_KERNEL_PRIVATE
116 /* Not to be included in headerdoc.
117 *
118 * @function OSAddAtomicLong
119 *
120 * @abstract
121 * 32/64-bit atomic add operation, depending on sizeof(long).
122 *
123 * @discussion
124 * See OSAddAtomic.
125 */
126 extern long OSAddAtomicLong(
127 long theAmount,
128 volatile long * address);
129
130 /* Not to be included in headerdoc.
131 *
132 * @function OSIncrementAtomicLong
133 *
134 * @abstract
135 * 32/64-bit increment, depending on sizeof(long)
136 *
137 * @discussion
138 * See OSIncrementAtomic.
139 */
140 inline static long OSIncrementAtomicLong(volatile long * address)
141 {
142 return OSAddAtomicLong(1L, address);
143 }
144
145 /* Not to be included in headerdoc.
146 *
147 * @function OSDecrementAtomicLong
148 *
149 * @abstract
150 * 32/64-bit decrement, depending on sizeof(long)
151 *@discussion See OSDecrementAtomic.
152 */
153 inline static long OSDecrementAtomicLong(volatile long * address)
154 {
155 return OSAddAtomicLong(-1L, address);
156 }
157 #endif /* XNU_KERNEL_PRIVATE */
158
159 /*
160 * The macro SAFE_CAST_PTR() casts one type of pointer to another type, making sure
161 * the data the pointer is referencing is the same size. If it is not, it will cause
162 * a division by zero compiler warning. This is to work around "SInt32" being defined
163 * as "long" on ILP32 and as "int" on LP64, which would require an explicit cast to
164 * "SInt32*" when for instance passing an "int*" to OSAddAtomic() - which masks size
165 * mismatches.
166 * -- var is used twice, but sizeof does not evaluate the
167 * argument, i.e. we're safe against "++" etc. in var --
168 */
169 #ifdef XNU_KERNEL_PRIVATE
170 #define SAFE_CAST_PTR(type, var) (((type)(var))+(0/(sizeof(*var) == sizeof(*(type)0) ? 1 : 0)))
171 #else
172 #define SAFE_CAST_PTR(type, var) ((type)(var))
173 #endif
174
175 /*!
176 * @function OSCompareAndSwap
177 *
178 * @abstract
179 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
180 *
181 * @discussion
182 * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
183 *
184 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
185 *
186 * @param oldValue The value to compare at address.
187 * @param newValue The value to write to address if oldValue compares true.
188 * @param address The 4-byte aligned address of the data to update atomically.
189 * @result true if newValue was written to the address.
190 */
191 extern Boolean OSCompareAndSwap(
192 UInt32 oldValue,
193 UInt32 newValue,
194 volatile UInt32 * address);
195 #define OSCompareAndSwap(a, b, c) \
196 (OSCompareAndSwap(a, b, SAFE_CAST_PTR(volatile UInt32*,c)))
197
198 /*!
199 * @function OSCompareAndSwapPtr
200 *
201 * @abstract
202 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
203 *
204 * @discussion
205 * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false.
206 *
207 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
208 * @param oldValue The pointer value to compare at address.
209 * @param newValue The pointer value to write to address if oldValue compares true.
210 * @param address The pointer-size aligned address of the data to update atomically.
211 * @result true if newValue was written to the address.
212 */
213 extern Boolean OSCompareAndSwapPtr(
214 void * oldValue,
215 void * newValue,
216 void * volatile * address);
217 #define OSCompareAndSwapPtr(a, b, c) \
218 (OSCompareAndSwapPtr(a, b, SAFE_CAST_PTR(void * volatile *,c)))
219
220 /*!
221 * @function OSAddAtomic
222 *
223 * @abstract
224 * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
225 *
226 * @discussion
227 * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value.
228 *
229 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
230 * @param amount The amount to add.
231 * @param address The 4-byte aligned address of the value to update atomically.
232 * @result The value before the addition
233 */
234 extern SInt32 OSAddAtomic(
235 SInt32 amount,
236 volatile SInt32 * address);
237 #define OSAddAtomic(a, b) \
238 (OSAddAtomic(a, SAFE_CAST_PTR(volatile SInt32*,b)))
239
240 /*!
241 * @function OSAddAtomic16
242 *
243 * @abstract
244 * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
245 *
246 * @discussion
247 * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value.
248 *
249 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
250 * @param amount The amount to add.
251 * @param address The 2-byte aligned address of the value to update atomically.
252 * @result The value before the addition
253 */
254 extern SInt16 OSAddAtomic16(
255 SInt32 amount,
256 volatile SInt16 * address);
257
258 /*!
259 * @function OSAddAtomic8
260 *
261 * @abstract
262 * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
263 *
264 * @discussion
265 * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value.
266 *
267 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
268 * @param amount The amount to add.
269 * @param address The address of the value to update atomically.
270 * @result The value before the addition.
271 */
272 extern SInt8 OSAddAtomic8(
273 SInt32 amount,
274 volatile SInt8 * address);
275
276 /*!
277 * @function OSIncrementAtomic
278 *
279 * @abstract
280 * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
281 *
282 * @discussion
283 * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value.
284 *
285 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
286 * @param address The 4-byte aligned address of the value to update atomically.
287 * @result The value before the increment.
288 */
289 extern SInt32 OSIncrementAtomic(volatile SInt32 * address);
290 #define OSIncrementAtomic(a) \
291 (OSIncrementAtomic(SAFE_CAST_PTR(volatile SInt32*,a)))
292
293 /*!
294 * @function OSIncrementAtomic16
295 *
296 * @abstract
297 * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
298 *
299 * @discussion
300 * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value.
301 *
302 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
303 * @param address The 2-byte aligned address of the value to update atomically.
304 * @result The value before the increment.
305 */
306 extern SInt16 OSIncrementAtomic16(volatile SInt16 * address);
307
308 /*!
309 * @function OSIncrementAtomic8
310 *
311 * @abstract
312 * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
313 *
314 * @discussion
315 * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value.
316 *
317 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
318 * @param address The address of the value to update atomically.
319 * @result The value before the increment.
320 */
321 extern SInt8 OSIncrementAtomic8(volatile SInt8 * address);
322
323 /*!
324 * @function OSDecrementAtomic
325 *
326 * @abstract
327 * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
328 *
329 * @discussion
330 * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value.
331 *
332 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
333 * @param address The 4-byte aligned address of the value to update atomically.
334 * @result The value before the decrement.
335 */
336 extern SInt32 OSDecrementAtomic(volatile SInt32 * address);
337 #define OSDecrementAtomic(a) \
338 (OSDecrementAtomic(SAFE_CAST_PTR(volatile SInt32*,a)))
339
340 /*!
341 * @function OSDecrementAtomic16
342 *
343 * @abstract
344 * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
345 *
346 * @discussion
347 * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value.
348 *
349 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
350 * @param address The 2-byte aligned address of the value to update atomically.
351 * @result The value before the decrement.
352 */
353 extern SInt16 OSDecrementAtomic16(volatile SInt16 * address);
354
355 /*!
356 * @function OSDecrementAtomic8
357 *
358 * @abstract
359 * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
360 *
361 * @discussion
362 * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value.
363 *
364 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
365 * @param address The address of the value to update atomically.
366 * @result The value before the decrement.
367 */
368 extern SInt8 OSDecrementAtomic8(volatile SInt8 * address);
369
370 /*!
371 * @function OSBitAndAtomic
372 *
373 * @abstract
374 * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
375 *
376 * @discussion
377 * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
378 *
379 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
380 * @param mask The mask to logically and with the value.
381 * @param address The 4-byte aligned address of the value to update atomically.
382 * @result The value before the bitwise operation
383 */
384 extern UInt32 OSBitAndAtomic(
385 UInt32 mask,
386 volatile UInt32 * address);
387 #define OSBitAndAtomic(a, b) \
388 (OSBitAndAtomic(a, SAFE_CAST_PTR(volatile UInt32*,b)))
389
390 /*!
391 * @function OSBitAndAtomic16
392 *
393 * @abstract
394 * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
395 *
396 * @discussion
397 * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
398 *
399 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
400 * @param mask The mask to logically and with the value.
401 * @param address The 2-byte aligned address of the value to update atomically.
402 * @result The value before the bitwise operation.
403 */
404 extern UInt16 OSBitAndAtomic16(
405 UInt32 mask,
406 volatile UInt16 * address);
407
408 /*!
409 * @function OSBitAndAtomic8
410 *
411 * @abstract
412 * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
413 *
414 * @discussion
415 * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
416 *
417 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
418 * @param mask The mask to logically and with the value.
419 * @param address The address of the value to update atomically.
420 * @result The value before the bitwise operation.
421 */
422 extern UInt8 OSBitAndAtomic8(
423 UInt32 mask,
424 volatile UInt8 * address);
425
426 /*!
427 * @function OSBitOrAtomic
428 *
429 * @abstract
430 * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
431 *
432 * @discussion
433 * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
434 *
435 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
436 * @param mask The mask to logically or with the value.
437 * @param address The 4-byte aligned address of the value to update atomically.
438 * @result The value before the bitwise operation.
439 */
440 extern UInt32 OSBitOrAtomic(
441 UInt32 mask,
442 volatile UInt32 * address);
443 #define OSBitOrAtomic(a, b) \
444 (OSBitOrAtomic(a, SAFE_CAST_PTR(volatile UInt32*,b)))
445
446 /*!
447 * @function OSBitOrAtomic16
448 *
449 * @abstract
450 * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
451 *
452 * @discussion
453 * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
454 *
455 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
456 * @param mask The mask to logically or with the value.
457 * @param address The 2-byte aligned address of the value to update atomically.
458 * @result The value before the bitwise operation.
459 */
460 extern UInt16 OSBitOrAtomic16(
461 UInt32 mask,
462 volatile UInt16 * address);
463
464 /*!
465 * @function OSBitOrAtomic8
466 *
467 * @abstract
468 * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
469 *
470 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
471 *
472 * @discussion
473 * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
474 * @param mask The mask to logically or with the value.
475 * @param address The address of the value to update atomically.
476 * @result The value before the bitwise operation.
477 */
478 extern UInt8 OSBitOrAtomic8(
479 UInt32 mask,
480 volatile UInt8 * address);
481
482 /*!
483 * @function OSBitXorAtomic
484 *
485 * @abstract
486 * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
487 *
488 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
489 *
490 * @discussion
491 * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
492 * @param mask The mask to logically or with the value.
493 * @param address The 4-byte aligned address of the value to update atomically.
494 * @result The value before the bitwise operation.
495 */
496 extern UInt32 OSBitXorAtomic(
497 UInt32 mask,
498 volatile UInt32 * address);
499 #define OSBitXorAtomic(a, b) \
500 (OSBitXorAtomic(a, SAFE_CAST_PTR(volatile UInt32*,b)))
501
502 /*!
503 * @function OSBitXorAtomic16
504 *
505 * @abstract
506 * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
507 *
508 * @discussion
509 * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
510 *
511 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
512 * @param mask The mask to logically or with the value.
513 * @param address The 2-byte aligned address of the value to update atomically.
514 * @result The value before the bitwise operation.
515 */
516 extern UInt16 OSBitXorAtomic16(
517 UInt32 mask,
518 volatile UInt16 * address);
519
520 /*!
521 * @function OSBitXorAtomic8
522 *
523 * @abstract
524 * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
525 *
526 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
527 *
528 * @discussion
529 * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
530 * @param mask The mask to logically or with the value.
531 * @param address The address of the value to update atomically.
532 * @result The value before the bitwise operation.
533 */
534 extern UInt8 OSBitXorAtomic8(
535 UInt32 mask,
536 volatile UInt8 * address);
537
538 /*!
539 * @function OSTestAndSet
540 *
541 * @abstract
542 * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
543 *
544 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
545 *
546 * @discussion
547 * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise.
548 * @param bit The bit number in the range 0 through 7.
549 * @param startAddress The address of the byte to update atomically.
550 * @result true if the bit was already set, false otherwise.
551 */
552 extern Boolean OSTestAndSet(
553 UInt32 bit,
554 volatile UInt8 * startAddress);
555
556 /*!
557 * @function OSTestAndClear
558 *
559 * @abstract
560 * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
561 *
562 * @discussion
563 * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise.
564 *
565 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
566 * @param bit The bit number in the range 0 through 7.
567 * @param startAddress The address of the byte to update atomically.
568 * @result true if the bit was already clear, false otherwise.
569 */
570 extern Boolean OSTestAndClear(
571 UInt32 bit,
572 volatile UInt8 * startAddress);
573
574 #ifdef __ppc__
575 /*!
576 * @function OSEnqueueAtomic
577 *
578 * @abstract
579 * Singly linked list head insertion, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
580 *
581 * @discussion
582 * The OSEnqueueAtomic function places an element at the head of a single linked list, which is specified with the address of a head pointer, listHead. The element structure has a next field whose offset is specified.
583 *
584 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
585 * @param listHead The address of a head pointer for the list .
586 * @param element The list element to insert at the head of the list.
587 * @param elementNextFieldOffset The byte offset into the element where a pointer to the next element in the list is stored.
588 */
589 extern void OSEnqueueAtomic(
590 void * volatile * listHead,
591 void * element,
592 SInt32 elementNextFieldOffset);
593
594 /*!
595 * @function OSDequeueAtomic
596 *
597 * @abstract
598 * Singly linked list element head removal, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
599 *
600 * @discussion
601 * The OSDequeueAtomic function removes an element from the head of a single linked list, which is specified with the address of a head pointer, listHead. The element structure has a next field whose offset is specified.
602 *
603 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
604 * @param listHead The address of a head pointer for the list .
605 * @param elementNextFieldOffset The byte offset into the element where a pointer to the next element in the list is stored.
606 * @result A removed element, or zero if the list is empty.
607 */
608 extern void * OSDequeueAtomic(
609 void * volatile * listHead,
610 SInt32 elementNextFieldOffset);
611 #endif /* __ppc__ */
612
613 /*!
614 * @function OSSynchronizeIO
615 *
616 * @abstract
617 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices.
618 *
619 * @discussion
620 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors.
621 */
622 static __inline__ void OSSynchronizeIO(void)
623 {
624 #if defined(__ppc__)
625 __asm__ ("eieio");
626 #endif
627 }
628
629 #if defined(__cplusplus)
630 }
631 #endif
632
633 #endif /* ! _OS_OSATOMIC_H */
Cache object: 1ccecac4be0b6d1f5b410e312d88e9ba
|