1 /*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
27 * $FreeBSD$
28 */
29
30 #ifndef _MACHINE_ATOMIC_H_
31 #define _MACHINE_ATOMIC_H_
32
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36
37 #include <sys/atomic_common.h>
38
39 /*
40 * Note: All the 64-bit atomic operations are only atomic when running
41 * in 64-bit mode. It is assumed that code compiled for n32 and n64
42 * fits into this definition and no further safeties are needed.
43 *
44 * It is also assumed that the add, subtract and other arithmetic is
45 * done on numbers not pointers. The special rules for n32 pointers
46 * do not have atomic operations defined for them, but generally shouldn't
47 * need atomic operations.
48 */
49 #ifndef __MIPS_PLATFORM_SYNC_NOPS
50 #define __MIPS_PLATFORM_SYNC_NOPS ""
51 #endif
52
53 static __inline void
54 mips_sync(void)
55 {
56 __asm __volatile (".set noreorder\n"
57 "\tsync\n"
58 __MIPS_PLATFORM_SYNC_NOPS
59 ".set reorder\n"
60 : : : "memory");
61 }
62
63 #define mb() mips_sync()
64 #define wmb() mips_sync()
65 #define rmb() mips_sync()
66
67 /*
68 * Various simple arithmetic on memory which is atomic in the presence
69 * of interrupts and SMP safe.
70 */
71
72 void atomic_set_8(__volatile uint8_t *, uint8_t);
73 void atomic_clear_8(__volatile uint8_t *, uint8_t);
74 void atomic_add_8(__volatile uint8_t *, uint8_t);
75 void atomic_subtract_8(__volatile uint8_t *, uint8_t);
76
77 void atomic_set_16(__volatile uint16_t *, uint16_t);
78 void atomic_clear_16(__volatile uint16_t *, uint16_t);
79 void atomic_add_16(__volatile uint16_t *, uint16_t);
80 void atomic_subtract_16(__volatile uint16_t *, uint16_t);
81
82 static __inline void
83 atomic_set_32(__volatile uint32_t *p, uint32_t v)
84 {
85 uint32_t temp;
86
87 __asm __volatile (
88 "1:\tll %0, %3\n\t" /* load old value */
89 "or %0, %2, %0\n\t" /* calculate new value */
90 "sc %0, %1\n\t" /* attempt to store */
91 "beqz %0, 1b\n\t" /* spin if failed */
92 : "=&r" (temp), "=m" (*p)
93 : "r" (v), "m" (*p)
94 : "memory");
95
96 }
97
98 static __inline void
99 atomic_clear_32(__volatile uint32_t *p, uint32_t v)
100 {
101 uint32_t temp;
102 v = ~v;
103
104 __asm __volatile (
105 "1:\tll %0, %3\n\t" /* load old value */
106 "and %0, %2, %0\n\t" /* calculate new value */
107 "sc %0, %1\n\t" /* attempt to store */
108 "beqz %0, 1b\n\t" /* spin if failed */
109 : "=&r" (temp), "=m" (*p)
110 : "r" (v), "m" (*p)
111 : "memory");
112 }
113
114 static __inline void
115 atomic_add_32(__volatile uint32_t *p, uint32_t v)
116 {
117 uint32_t temp;
118
119 __asm __volatile (
120 "1:\tll %0, %3\n\t" /* load old value */
121 "addu %0, %2, %0\n\t" /* calculate new value */
122 "sc %0, %1\n\t" /* attempt to store */
123 "beqz %0, 1b\n\t" /* spin if failed */
124 : "=&r" (temp), "=m" (*p)
125 : "r" (v), "m" (*p)
126 : "memory");
127 }
128
129 static __inline void
130 atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
131 {
132 uint32_t temp;
133
134 __asm __volatile (
135 "1:\tll %0, %3\n\t" /* load old value */
136 "subu %0, %2\n\t" /* calculate new value */
137 "sc %0, %1\n\t" /* attempt to store */
138 "beqz %0, 1b\n\t" /* spin if failed */
139 : "=&r" (temp), "=m" (*p)
140 : "r" (v), "m" (*p)
141 : "memory");
142 }
143
144 static __inline uint32_t
145 atomic_readandclear_32(__volatile uint32_t *addr)
146 {
147 uint32_t result,temp;
148
149 __asm __volatile (
150 "1:\tll %0,%3\n\t" /* load current value, asserting lock */
151 "li %1,0\n\t" /* value to store */
152 "sc %1,%2\n\t" /* attempt to store */
153 "beqz %1, 1b\n\t" /* if the store failed, spin */
154 : "=&r"(result), "=&r"(temp), "=m" (*addr)
155 : "m" (*addr)
156 : "memory");
157
158 return result;
159 }
160
161 static __inline uint32_t
162 atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
163 {
164 uint32_t result,temp;
165
166 __asm __volatile (
167 "1:\tll %0,%3\n\t" /* load current value, asserting lock */
168 "or %1,$0,%4\n\t"
169 "sc %1,%2\n\t" /* attempt to store */
170 "beqz %1, 1b\n\t" /* if the store failed, spin */
171 : "=&r"(result), "=&r"(temp), "=m" (*addr)
172 : "m" (*addr), "r" (value)
173 : "memory");
174
175 return result;
176 }
177
178 #if defined(__mips_n64) || defined(__mips_n32)
179 static __inline void
180 atomic_set_64(__volatile uint64_t *p, uint64_t v)
181 {
182 uint64_t temp;
183
184 __asm __volatile (
185 "1:\n\t"
186 "lld %0, %3\n\t" /* load old value */
187 "or %0, %2, %0\n\t" /* calculate new value */
188 "scd %0, %1\n\t" /* attempt to store */
189 "beqz %0, 1b\n\t" /* spin if failed */
190 : "=&r" (temp), "=m" (*p)
191 : "r" (v), "m" (*p)
192 : "memory");
193
194 }
195
196 static __inline void
197 atomic_clear_64(__volatile uint64_t *p, uint64_t v)
198 {
199 uint64_t temp;
200 v = ~v;
201
202 __asm __volatile (
203 "1:\n\t"
204 "lld %0, %3\n\t" /* load old value */
205 "and %0, %2, %0\n\t" /* calculate new value */
206 "scd %0, %1\n\t" /* attempt to store */
207 "beqz %0, 1b\n\t" /* spin if failed */
208 : "=&r" (temp), "=m" (*p)
209 : "r" (v), "m" (*p)
210 : "memory");
211 }
212
213 static __inline void
214 atomic_add_64(__volatile uint64_t *p, uint64_t v)
215 {
216 uint64_t temp;
217
218 __asm __volatile (
219 "1:\n\t"
220 "lld %0, %3\n\t" /* load old value */
221 "daddu %0, %2, %0\n\t" /* calculate new value */
222 "scd %0, %1\n\t" /* attempt to store */
223 "beqz %0, 1b\n\t" /* spin if failed */
224 : "=&r" (temp), "=m" (*p)
225 : "r" (v), "m" (*p)
226 : "memory");
227 }
228
229 static __inline void
230 atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
231 {
232 uint64_t temp;
233
234 __asm __volatile (
235 "1:\n\t"
236 "lld %0, %3\n\t" /* load old value */
237 "dsubu %0, %2\n\t" /* calculate new value */
238 "scd %0, %1\n\t" /* attempt to store */
239 "beqz %0, 1b\n\t" /* spin if failed */
240 : "=&r" (temp), "=m" (*p)
241 : "r" (v), "m" (*p)
242 : "memory");
243 }
244
245 static __inline uint64_t
246 atomic_readandclear_64(__volatile uint64_t *addr)
247 {
248 uint64_t result,temp;
249
250 __asm __volatile (
251 "1:\n\t"
252 "lld %0, %3\n\t" /* load old value */
253 "li %1, 0\n\t" /* value to store */
254 "scd %1, %2\n\t" /* attempt to store */
255 "beqz %1, 1b\n\t" /* if the store failed, spin */
256 : "=&r"(result), "=&r"(temp), "=m" (*addr)
257 : "m" (*addr)
258 : "memory");
259
260 return result;
261 }
262
263 static __inline uint64_t
264 atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
265 {
266 uint64_t result,temp;
267
268 __asm __volatile (
269 "1:\n\t"
270 "lld %0,%3\n\t" /* Load old value*/
271 "or %1,$0,%4\n\t"
272 "scd %1,%2\n\t" /* attempt to store */
273 "beqz %1, 1b\n\t" /* if the store failed, spin */
274 : "=&r"(result), "=&r"(temp), "=m" (*addr)
275 : "m" (*addr), "r" (value)
276 : "memory");
277
278 return result;
279 }
280 #endif
281
282 #define ATOMIC_ACQ_REL(NAME, WIDTH) \
283 static __inline void \
284 atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
285 { \
286 atomic_##NAME##_##WIDTH(p, v); \
287 mips_sync(); \
288 } \
289 \
290 static __inline void \
291 atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
292 { \
293 mips_sync(); \
294 atomic_##NAME##_##WIDTH(p, v); \
295 }
296
297 /* Variants of simple arithmetic with memory barriers. */
298 ATOMIC_ACQ_REL(set, 8)
299 ATOMIC_ACQ_REL(clear, 8)
300 ATOMIC_ACQ_REL(add, 8)
301 ATOMIC_ACQ_REL(subtract, 8)
302 ATOMIC_ACQ_REL(set, 16)
303 ATOMIC_ACQ_REL(clear, 16)
304 ATOMIC_ACQ_REL(add, 16)
305 ATOMIC_ACQ_REL(subtract, 16)
306 ATOMIC_ACQ_REL(set, 32)
307 ATOMIC_ACQ_REL(clear, 32)
308 ATOMIC_ACQ_REL(add, 32)
309 ATOMIC_ACQ_REL(subtract, 32)
310 #if defined(__mips_n64) || defined(__mips_n32)
311 ATOMIC_ACQ_REL(set, 64)
312 ATOMIC_ACQ_REL(clear, 64)
313 ATOMIC_ACQ_REL(add, 64)
314 ATOMIC_ACQ_REL(subtract, 64)
315 #endif
316
317 #undef ATOMIC_ACQ_REL
318
319 /*
320 * We assume that a = b will do atomic loads and stores.
321 */
322 #define ATOMIC_STORE_LOAD(WIDTH) \
323 static __inline uint##WIDTH##_t \
324 atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p) \
325 { \
326 uint##WIDTH##_t v; \
327 \
328 v = *p; \
329 mips_sync(); \
330 return (v); \
331 } \
332 \
333 static __inline void \
334 atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
335 { \
336 mips_sync(); \
337 *p = v; \
338 }
339
340 ATOMIC_STORE_LOAD(32)
341 ATOMIC_STORE_LOAD(64)
342 #undef ATOMIC_STORE_LOAD
343
344 /*
345 * Atomically compare the value stored at *p with cmpval and if the
346 * two values are equal, update the value of *p with newval. Returns
347 * zero if the compare failed, nonzero otherwise.
348 */
349 static __inline uint32_t
350 atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
351 {
352 uint32_t ret;
353
354 __asm __volatile (
355 "1:\tll %0, %4\n\t" /* load old value */
356 "bne %0, %2, 2f\n\t" /* compare */
357 "move %0, %3\n\t" /* value to store */
358 "sc %0, %1\n\t" /* attempt to store */
359 "beqz %0, 1b\n\t" /* if it failed, spin */
360 "j 3f\n\t"
361 "2:\n\t"
362 "li %0, 0\n\t"
363 "3:\n"
364 : "=&r" (ret), "=m" (*p)
365 : "r" (cmpval), "r" (newval), "m" (*p)
366 : "memory");
367
368 return ret;
369 }
370
371 /*
372 * Atomically compare the value stored at *p with cmpval and if the
373 * two values are equal, update the value of *p with newval. Returns
374 * zero if the compare failed, nonzero otherwise.
375 */
376 static __inline uint32_t
377 atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
378 {
379 int retval;
380
381 retval = atomic_cmpset_32(p, cmpval, newval);
382 mips_sync();
383 return (retval);
384 }
385
386 static __inline uint32_t
387 atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
388 {
389 mips_sync();
390 return (atomic_cmpset_32(p, cmpval, newval));
391 }
392
393 static __inline uint32_t
394 atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
395 {
396 uint32_t ret;
397
398 __asm __volatile (
399 "1:\n\t"
400 "ll %0, %1\n\t" /* load old value */
401 "bne %0, %4, 2f\n\t" /* compare */
402 "move %0, %3\n\t" /* value to store */
403 "sc %0, %1\n\t" /* attempt to store */
404 "beqz %0, 1b\n\t" /* if it failed, spin */
405 "j 3f\n\t"
406 "2:\n\t"
407 "sw %0, %2\n\t" /* save old value */
408 "li %0, 0\n\t"
409 "3:\n"
410 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
411 : "r" (newval), "r" (*cmpval)
412 : "memory");
413 return ret;
414 }
415
416 static __inline uint32_t
417 atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
418 {
419 int retval;
420
421 retval = atomic_fcmpset_32(p, cmpval, newval);
422 mips_sync();
423 return (retval);
424 }
425
426 static __inline uint32_t
427 atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
428 {
429 mips_sync();
430 return (atomic_fcmpset_32(p, cmpval, newval));
431 }
432
433 /*
434 * Atomically add the value of v to the integer pointed to by p and return
435 * the previous value of *p.
436 */
437 static __inline uint32_t
438 atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
439 {
440 uint32_t value, temp;
441
442 __asm __volatile (
443 "1:\tll %0, %1\n\t" /* load old value */
444 "addu %2, %3, %0\n\t" /* calculate new value */
445 "sc %2, %1\n\t" /* attempt to store */
446 "beqz %2, 1b\n\t" /* spin if failed */
447 : "=&r" (value), "=m" (*p), "=&r" (temp)
448 : "r" (v), "m" (*p));
449 return (value);
450 }
451
452 #if defined(__mips_n64) || defined(__mips_n32)
453 /*
454 * Atomically compare the value stored at *p with cmpval and if the
455 * two values are equal, update the value of *p with newval. Returns
456 * zero if the compare failed, nonzero otherwise.
457 */
458 static __inline uint64_t
459 atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
460 {
461 uint64_t ret;
462
463 __asm __volatile (
464 "1:\n\t"
465 "lld %0, %4\n\t" /* load old value */
466 "bne %0, %2, 2f\n\t" /* compare */
467 "move %0, %3\n\t" /* value to store */
468 "scd %0, %1\n\t" /* attempt to store */
469 "beqz %0, 1b\n\t" /* if it failed, spin */
470 "j 3f\n\t"
471 "2:\n\t"
472 "li %0, 0\n\t"
473 "3:\n"
474 : "=&r" (ret), "=m" (*p)
475 : "r" (cmpval), "r" (newval), "m" (*p)
476 : "memory");
477
478 return ret;
479 }
480
481 /*
482 * Atomically compare the value stored at *p with cmpval and if the
483 * two values are equal, update the value of *p with newval. Returns
484 * zero if the compare failed, nonzero otherwise.
485 */
486 static __inline uint64_t
487 atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
488 {
489 int retval;
490
491 retval = atomic_cmpset_64(p, cmpval, newval);
492 mips_sync();
493 return (retval);
494 }
495
496 static __inline uint64_t
497 atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
498 {
499 mips_sync();
500 return (atomic_cmpset_64(p, cmpval, newval));
501 }
502
503 static __inline uint32_t
504 atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
505 {
506 uint32_t ret;
507
508 __asm __volatile (
509 "1:\n\t"
510 "lld %0, %1\n\t" /* load old value */
511 "bne %0, %4, 2f\n\t" /* compare */
512 "move %0, %3\n\t" /* value to store */
513 "scd %0, %1\n\t" /* attempt to store */
514 "beqz %0, 1b\n\t" /* if it failed, spin */
515 "j 3f\n\t"
516 "2:\n\t"
517 "sd %0, %2\n\t" /* save old value */
518 "li %0, 0\n\t"
519 "3:\n"
520 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
521 : "r" (newval), "r" (*cmpval)
522 : "memory");
523
524 return ret;
525 }
526
527 static __inline uint64_t
528 atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
529 {
530 int retval;
531
532 retval = atomic_fcmpset_64(p, cmpval, newval);
533 mips_sync();
534 return (retval);
535 }
536
537 static __inline uint64_t
538 atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
539 {
540 mips_sync();
541 return (atomic_fcmpset_64(p, cmpval, newval));
542 }
543
544 /*
545 * Atomically add the value of v to the integer pointed to by p and return
546 * the previous value of *p.
547 */
548 static __inline uint64_t
549 atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
550 {
551 uint64_t value, temp;
552
553 __asm __volatile (
554 "1:\n\t"
555 "lld %0, %1\n\t" /* load old value */
556 "daddu %2, %3, %0\n\t" /* calculate new value */
557 "scd %2, %1\n\t" /* attempt to store */
558 "beqz %2, 1b\n\t" /* spin if failed */
559 : "=&r" (value), "=m" (*p), "=&r" (temp)
560 : "r" (v), "m" (*p));
561 return (value);
562 }
563 #endif
564
565 static __inline void
566 atomic_thread_fence_acq(void)
567 {
568
569 mips_sync();
570 }
571
572 static __inline void
573 atomic_thread_fence_rel(void)
574 {
575
576 mips_sync();
577 }
578
579 static __inline void
580 atomic_thread_fence_acq_rel(void)
581 {
582
583 mips_sync();
584 }
585
586 static __inline void
587 atomic_thread_fence_seq_cst(void)
588 {
589
590 mips_sync();
591 }
592
593 /* Operations on chars. */
594 #define atomic_set_char atomic_set_8
595 #define atomic_set_acq_char atomic_set_acq_8
596 #define atomic_set_rel_char atomic_set_rel_8
597 #define atomic_clear_char atomic_clear_8
598 #define atomic_clear_acq_char atomic_clear_acq_8
599 #define atomic_clear_rel_char atomic_clear_rel_8
600 #define atomic_add_char atomic_add_8
601 #define atomic_add_acq_char atomic_add_acq_8
602 #define atomic_add_rel_char atomic_add_rel_8
603 #define atomic_subtract_char atomic_subtract_8
604 #define atomic_subtract_acq_char atomic_subtract_acq_8
605 #define atomic_subtract_rel_char atomic_subtract_rel_8
606
607 /* Operations on shorts. */
608 #define atomic_set_short atomic_set_16
609 #define atomic_set_acq_short atomic_set_acq_16
610 #define atomic_set_rel_short atomic_set_rel_16
611 #define atomic_clear_short atomic_clear_16
612 #define atomic_clear_acq_short atomic_clear_acq_16
613 #define atomic_clear_rel_short atomic_clear_rel_16
614 #define atomic_add_short atomic_add_16
615 #define atomic_add_acq_short atomic_add_acq_16
616 #define atomic_add_rel_short atomic_add_rel_16
617 #define atomic_subtract_short atomic_subtract_16
618 #define atomic_subtract_acq_short atomic_subtract_acq_16
619 #define atomic_subtract_rel_short atomic_subtract_rel_16
620
621 /* Operations on ints. */
622 #define atomic_set_int atomic_set_32
623 #define atomic_set_acq_int atomic_set_acq_32
624 #define atomic_set_rel_int atomic_set_rel_32
625 #define atomic_clear_int atomic_clear_32
626 #define atomic_clear_acq_int atomic_clear_acq_32
627 #define atomic_clear_rel_int atomic_clear_rel_32
628 #define atomic_add_int atomic_add_32
629 #define atomic_add_acq_int atomic_add_acq_32
630 #define atomic_add_rel_int atomic_add_rel_32
631 #define atomic_subtract_int atomic_subtract_32
632 #define atomic_subtract_acq_int atomic_subtract_acq_32
633 #define atomic_subtract_rel_int atomic_subtract_rel_32
634 #define atomic_cmpset_int atomic_cmpset_32
635 #define atomic_cmpset_acq_int atomic_cmpset_acq_32
636 #define atomic_cmpset_rel_int atomic_cmpset_rel_32
637 #define atomic_fcmpset_int atomic_fcmpset_32
638 #define atomic_fcmpset_acq_int atomic_fcmpset_acq_32
639 #define atomic_fcmpset_rel_int atomic_fcmpset_rel_32
640 #define atomic_load_acq_int atomic_load_acq_32
641 #define atomic_store_rel_int atomic_store_rel_32
642 #define atomic_readandclear_int atomic_readandclear_32
643 #define atomic_readandset_int atomic_readandset_32
644 #define atomic_fetchadd_int atomic_fetchadd_32
645
646 /*
647 * I think the following is right, even for n32. For n32 the pointers
648 * are still 32-bits, so we need to operate on them as 32-bit quantities,
649 * even though they are sign extended in operation. For longs, there's
650 * no question because they are always 32-bits.
651 */
652 #ifdef __mips_n64
653 /* Operations on longs. */
654 #define atomic_set_long atomic_set_64
655 #define atomic_set_acq_long atomic_set_acq_64
656 #define atomic_set_rel_long atomic_set_rel_64
657 #define atomic_clear_long atomic_clear_64
658 #define atomic_clear_acq_long atomic_clear_acq_64
659 #define atomic_clear_rel_long atomic_clear_rel_64
660 #define atomic_add_long atomic_add_64
661 #define atomic_add_acq_long atomic_add_acq_64
662 #define atomic_add_rel_long atomic_add_rel_64
663 #define atomic_subtract_long atomic_subtract_64
664 #define atomic_subtract_acq_long atomic_subtract_acq_64
665 #define atomic_subtract_rel_long atomic_subtract_rel_64
666 #define atomic_cmpset_long atomic_cmpset_64
667 #define atomic_cmpset_acq_long atomic_cmpset_acq_64
668 #define atomic_cmpset_rel_long atomic_cmpset_rel_64
669 #define atomic_fcmpset_long atomic_fcmpset_64
670 #define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
671 #define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
672 #define atomic_load_acq_long atomic_load_acq_64
673 #define atomic_store_rel_long atomic_store_rel_64
674 #define atomic_fetchadd_long atomic_fetchadd_64
675 #define atomic_readandclear_long atomic_readandclear_64
676
677 #else /* !__mips_n64 */
678
679 /* Operations on longs. */
680 #define atomic_set_long(p, v) \
681 atomic_set_32((volatile u_int *)(p), (u_int)(v))
682 #define atomic_set_acq_long(p, v) \
683 atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
684 #define atomic_set_rel_long(p, v) \
685 atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
686 #define atomic_clear_long(p, v) \
687 atomic_clear_32((volatile u_int *)(p), (u_int)(v))
688 #define atomic_clear_acq_long(p, v) \
689 atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
690 #define atomic_clear_rel_long(p, v) \
691 atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
692 #define atomic_add_long(p, v) \
693 atomic_add_32((volatile u_int *)(p), (u_int)(v))
694 #define atomic_add_acq_long(p, v) \
695 atomic_add_32((volatile u_int *)(p), (u_int)(v))
696 #define atomic_add_rel_long(p, v) \
697 atomic_add_32((volatile u_int *)(p), (u_int)(v))
698 #define atomic_subtract_long(p, v) \
699 atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
700 #define atomic_subtract_acq_long(p, v) \
701 atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
702 #define atomic_subtract_rel_long(p, v) \
703 atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
704 #define atomic_cmpset_long(p, cmpval, newval) \
705 atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval), \
706 (u_int)(newval))
707 #define atomic_cmpset_acq_long(p, cmpval, newval) \
708 atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval), \
709 (u_int)(newval))
710 #define atomic_cmpset_rel_long(p, cmpval, newval) \
711 atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval), \
712 (u_int)(newval))
713 #define atomic_fcmpset_long(p, cmpval, newval) \
714 atomic_fcmpset_32((volatile u_int *)(p), (u_int *)(cmpval), \
715 (u_int)(newval))
716 #define atomic_fcmpset_acq_long(p, cmpval, newval) \
717 atomic_fcmpset_acq_32((volatile u_int *)(p), (u_int *)(cmpval), \
718 (u_int)(newval))
719 #define atomic_fcmpset_rel_long(p, cmpval, newval) \
720 atomic_fcmpset_rel_32((volatile u_int *)(p), (u_int *)(cmpval), \
721 (u_int)(newval))
722 #define atomic_load_acq_long(p) \
723 (u_long)atomic_load_acq_32((volatile u_int *)(p))
724 #define atomic_store_rel_long(p, v) \
725 atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
726 #define atomic_fetchadd_long(p, v) \
727 atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
728 #define atomic_readandclear_long(p) \
729 atomic_readandclear_32((volatile u_int *)(p))
730
731 #endif /* __mips_n64 */
732
733 /* Operations on pointers. */
734 #define atomic_set_ptr atomic_set_long
735 #define atomic_set_acq_ptr atomic_set_acq_long
736 #define atomic_set_rel_ptr atomic_set_rel_long
737 #define atomic_clear_ptr atomic_clear_long
738 #define atomic_clear_acq_ptr atomic_clear_acq_long
739 #define atomic_clear_rel_ptr atomic_clear_rel_long
740 #define atomic_add_ptr atomic_add_long
741 #define atomic_add_acq_ptr atomic_add_acq_long
742 #define atomic_add_rel_ptr atomic_add_rel_long
743 #define atomic_subtract_ptr atomic_subtract_long
744 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
745 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
746 #define atomic_cmpset_ptr atomic_cmpset_long
747 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
748 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
749 #define atomic_fcmpset_ptr atomic_fcmpset_long
750 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
751 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
752 #define atomic_load_acq_ptr atomic_load_acq_long
753 #define atomic_store_rel_ptr atomic_store_rel_long
754 #define atomic_readandclear_ptr atomic_readandclear_long
755
756 #endif /* ! _MACHINE_ATOMIC_H_ */
Cache object: 1b61f7f4242cecb151c7dbe96aa08a75
|