1 /* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */
2
3 /*-
4 * Copyright (C) 2003-2004 Olivier Houchard
5 * Copyright (C) 1994-1997 Mark Brinicombe
6 * Copyright (C) 1994 Brini
7 * All rights reserved.
8 *
9 * This code is derived from software written for Brini by Mark Brinicombe
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Brini.
22 * 4. The name of Brini may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
31 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
33 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
34 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * $FreeBSD: releng/12.0/sys/arm/include/atomic-v4.h 337432 2018-08-07 18:56:01Z marius $
37 */
38
39 #ifndef _MACHINE_ATOMIC_V4_H_
40 #define _MACHINE_ATOMIC_V4_H_
41
42 #ifndef _MACHINE_ATOMIC_H_
43 #error Do not include this file directly, use <machine/atomic.h>
44 #endif
45
46 #if __ARM_ARCH <= 5
47 #define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
48 #define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
49 #define dmb() dsb()
50 #else
51 #error Only use this file with ARMv5 and earlier
52 #endif
53
54 #define mb() dmb()
55 #define wmb() dmb()
56 #define rmb() dmb()
57
58 #define __with_interrupts_disabled(expr) \
59 do { \
60 u_int cpsr_save, tmp; \
61 \
62 __asm __volatile( \
63 "mrs %0, cpsr;" \
64 "orr %1, %0, %2;" \
65 "msr cpsr_fsxc, %1;" \
66 : "=r" (cpsr_save), "=r" (tmp) \
67 : "I" (PSR_I | PSR_F) \
68 : "cc" ); \
69 (expr); \
70 __asm __volatile( \
71 "msr cpsr_fsxc, %0" \
72 : /* no output */ \
73 : "r" (cpsr_save) \
74 : "cc" ); \
75 } while(0)
76
77 static __inline uint32_t
78 __swp(uint32_t val, volatile uint32_t *ptr)
79 {
80 __asm __volatile("swp %0, %2, [%3]"
81 : "=&r" (val), "=m" (*ptr)
82 : "r" (val), "r" (ptr), "m" (*ptr)
83 : "memory");
84 return (val);
85 }
86
87
88 #ifdef _KERNEL
89 #define ARM_HAVE_ATOMIC64
90
91 static __inline void
92 atomic_add_32(volatile u_int32_t *p, u_int32_t val)
93 {
94 __with_interrupts_disabled(*p += val);
95 }
96
97 static __inline void
98 atomic_add_64(volatile u_int64_t *p, u_int64_t val)
99 {
100 __with_interrupts_disabled(*p += val);
101 }
102
103 static __inline void
104 atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
105 {
106 __with_interrupts_disabled(*address &= ~clearmask);
107 }
108
109 static __inline void
110 atomic_clear_64(volatile uint64_t *address, uint64_t clearmask)
111 {
112 __with_interrupts_disabled(*address &= ~clearmask);
113 }
114
115 static __inline int
116 atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval)
117 {
118 int ret;
119
120 __with_interrupts_disabled(
121 {
122 ret = *p;
123 if (*p == *cmpval) {
124 *p = newval;
125 ret = 1;
126 } else {
127 *cmpval = *p;
128 ret = 0;
129 }
130 });
131 return (ret);
132 }
133
134 static __inline int
135 atomic_fcmpset_64(volatile u_int64_t *p, volatile u_int64_t *cmpval, volatile u_int64_t newval)
136 {
137 int ret;
138
139 __with_interrupts_disabled(
140 {
141 if (*p == *cmpval) {
142 *p = newval;
143 ret = 1;
144 } else {
145 *cmpval = *p;
146 ret = 0;
147 }
148 });
149 return (ret);
150 }
151
152 static __inline int
153 atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
154 {
155 int ret;
156
157 __with_interrupts_disabled(
158 {
159 if (*p == cmpval) {
160 *p = newval;
161 ret = 1;
162 } else {
163 ret = 0;
164 }
165 });
166 return (ret);
167 }
168
169 static __inline int
170 atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval)
171 {
172 int ret;
173
174 __with_interrupts_disabled(
175 {
176 if (*p == cmpval) {
177 *p = newval;
178 ret = 1;
179 } else {
180 ret = 0;
181 }
182 });
183 return (ret);
184 }
185
186
187 static __inline uint32_t
188 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
189 {
190 uint32_t value;
191
192 __with_interrupts_disabled(
193 {
194 value = *p;
195 *p += v;
196 });
197 return (value);
198 }
199
200 static __inline uint64_t
201 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
202 {
203 uint64_t value;
204
205 __with_interrupts_disabled(
206 {
207 value = *p;
208 *p += v;
209 });
210 return (value);
211 }
212
213 static __inline uint64_t
214 atomic_load_64(volatile uint64_t *p)
215 {
216 uint64_t value;
217
218 __with_interrupts_disabled(value = *p);
219 return (value);
220 }
221
222 static __inline void
223 atomic_set_32(volatile uint32_t *address, uint32_t setmask)
224 {
225 __with_interrupts_disabled(*address |= setmask);
226 }
227
228 static __inline void
229 atomic_set_64(volatile uint64_t *address, uint64_t setmask)
230 {
231 __with_interrupts_disabled(*address |= setmask);
232 }
233
234 static __inline void
235 atomic_store_64(volatile uint64_t *p, uint64_t value)
236 {
237 __with_interrupts_disabled(*p = value);
238 }
239
240 static __inline void
241 atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
242 {
243 __with_interrupts_disabled(*p -= val);
244 }
245
246 static __inline void
247 atomic_subtract_64(volatile u_int64_t *p, u_int64_t val)
248 {
249 __with_interrupts_disabled(*p -= val);
250 }
251
252 static __inline uint64_t
253 atomic_swap_64(volatile uint64_t *p, uint64_t v)
254 {
255 uint64_t value;
256
257 __with_interrupts_disabled(
258 {
259 value = *p;
260 *p = v;
261 });
262 return (value);
263 }
264
265 #else /* !_KERNEL */
266
267 static __inline void
268 atomic_add_32(volatile u_int32_t *p, u_int32_t val)
269 {
270 int start, ras_start = ARM_RAS_START;
271
272 __asm __volatile("1:\n"
273 "adr %1, 1b\n"
274 "str %1, [%0]\n"
275 "adr %1, 2f\n"
276 "str %1, [%0, #4]\n"
277 "ldr %1, [%2]\n"
278 "add %1, %1, %3\n"
279 "str %1, [%2]\n"
280 "2:\n"
281 "mov %1, #0\n"
282 "str %1, [%0]\n"
283 "mov %1, #0xffffffff\n"
284 "str %1, [%0, #4]\n"
285 : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val)
286 : : "memory");
287 }
288
289 static __inline void
290 atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
291 {
292 int start, ras_start = ARM_RAS_START;
293
294 __asm __volatile("1:\n"
295 "adr %1, 1b\n"
296 "str %1, [%0]\n"
297 "adr %1, 2f\n"
298 "str %1, [%0, #4]\n"
299 "ldr %1, [%2]\n"
300 "bic %1, %1, %3\n"
301 "str %1, [%2]\n"
302 "2:\n"
303 "mov %1, #0\n"
304 "str %1, [%0]\n"
305 "mov %1, #0xffffffff\n"
306 "str %1, [%0, #4]\n"
307 : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (clearmask)
308 : : "memory");
309
310 }
311
312 static __inline int
313 atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
314 {
315 int done, ras_start = ARM_RAS_START;
316
317 __asm __volatile("1:\n"
318 "adr %1, 1b\n"
319 "str %1, [%0]\n"
320 "adr %1, 2f\n"
321 "str %1, [%0, #4]\n"
322 "ldr %1, [%2]\n"
323 "cmp %1, %3\n"
324 "streq %4, [%2]\n"
325 "2:\n"
326 "mov %1, #0\n"
327 "str %1, [%0]\n"
328 "mov %1, #0xffffffff\n"
329 "str %1, [%0, #4]\n"
330 "moveq %1, #1\n"
331 "movne %1, #0\n"
332 : "+r" (ras_start), "=r" (done)
333 ,"+r" (p), "+r" (cmpval), "+r" (newval) : : "cc", "memory");
334 return (done);
335 }
336
337 static __inline int
338 atomic_fcmpset_32(volatile u_int32_t *p, volatile u_int32_t *cmpval, volatile u_int32_t newval)
339 {
340 int done, oldval, ras_start = ARM_RAS_START;
341
342 __asm __volatile("1:\n"
343 "adr %1, 1b\n"
344 "str %1, [%0]\n"
345 "adr %1, 2f\n"
346 "str %1, [%0, #4]\n"
347 "ldr %1, [%2]\n"
348 "ldr %5, [%3]\n"
349 "cmp %1, %5\n"
350 "streq %4, [%2]\n"
351 "2:\n"
352 "mov %5, #0\n"
353 "str %5, [%0]\n"
354 "mov %5, #0xffffffff\n"
355 "str %5, [%0, #4]\n"
356 "strne %1, [%3]\n"
357 "moveq %1, #1\n"
358 "movne %1, #0\n"
359 : "+r" (ras_start), "=r" (done) ,"+r" (p)
360 , "+r" (cmpval), "+r" (newval), "+r" (oldval) : : "cc", "memory");
361 return (done);
362 }
363
364 static __inline uint32_t
365 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
366 {
367 uint32_t start, tmp, ras_start = ARM_RAS_START;
368
369 __asm __volatile("1:\n"
370 "adr %1, 1b\n"
371 "str %1, [%0]\n"
372 "adr %1, 2f\n"
373 "str %1, [%0, #4]\n"
374 "ldr %1, [%3]\n"
375 "mov %2, %1\n"
376 "add %2, %2, %4\n"
377 "str %2, [%3]\n"
378 "2:\n"
379 "mov %2, #0\n"
380 "str %2, [%0]\n"
381 "mov %2, #0xffffffff\n"
382 "str %2, [%0, #4]\n"
383 : "+r" (ras_start), "=r" (start), "=r" (tmp), "+r" (p), "+r" (v)
384 : : "memory");
385 return (start);
386 }
387
388 static __inline void
389 atomic_set_32(volatile uint32_t *address, uint32_t setmask)
390 {
391 int start, ras_start = ARM_RAS_START;
392
393 __asm __volatile("1:\n"
394 "adr %1, 1b\n"
395 "str %1, [%0]\n"
396 "adr %1, 2f\n"
397 "str %1, [%0, #4]\n"
398 "ldr %1, [%2]\n"
399 "orr %1, %1, %3\n"
400 "str %1, [%2]\n"
401 "2:\n"
402 "mov %1, #0\n"
403 "str %1, [%0]\n"
404 "mov %1, #0xffffffff\n"
405 "str %1, [%0, #4]\n"
406
407 : "+r" (ras_start), "=r" (start), "+r" (address), "+r" (setmask)
408 : : "memory");
409 }
410
411 static __inline void
412 atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
413 {
414 int start, ras_start = ARM_RAS_START;
415
416 __asm __volatile("1:\n"
417 "adr %1, 1b\n"
418 "str %1, [%0]\n"
419 "adr %1, 2f\n"
420 "str %1, [%0, #4]\n"
421 "ldr %1, [%2]\n"
422 "sub %1, %1, %3\n"
423 "str %1, [%2]\n"
424 "2:\n"
425 "mov %1, #0\n"
426 "str %1, [%0]\n"
427 "mov %1, #0xffffffff\n"
428 "str %1, [%0, #4]\n"
429
430 : "+r" (ras_start), "=r" (start), "+r" (p), "+r" (val)
431 : : "memory");
432 }
433
434 #endif /* _KERNEL */
435
436 static __inline uint32_t
437 atomic_readandclear_32(volatile u_int32_t *p)
438 {
439
440 return (__swp(0, p));
441 }
442
443 static __inline uint32_t
444 atomic_swap_32(volatile u_int32_t *p, u_int32_t v)
445 {
446
447 return (__swp(v, p));
448 }
449
450 #define atomic_fcmpset_rel_32 atomic_fcmpset_32
451 #define atomic_fcmpset_acq_32 atomic_fcmpset_32
452 #ifdef _KERNEL
453 #define atomic_fcmpset_rel_64 atomic_fcmpset_64
454 #define atomic_fcmpset_acq_64 atomic_fcmpset_64
455 #endif
456 #define atomic_fcmpset_acq_long atomic_fcmpset_long
457 #define atomic_fcmpset_rel_long atomic_fcmpset_long
458 #define atomic_cmpset_rel_32 atomic_cmpset_32
459 #define atomic_cmpset_acq_32 atomic_cmpset_32
460 #ifdef _KERNEL
461 #define atomic_cmpset_rel_64 atomic_cmpset_64
462 #define atomic_cmpset_acq_64 atomic_cmpset_64
463 #endif
464 #define atomic_set_rel_32 atomic_set_32
465 #define atomic_set_acq_32 atomic_set_32
466 #define atomic_clear_rel_32 atomic_clear_32
467 #define atomic_clear_acq_32 atomic_clear_32
468 #define atomic_add_rel_32 atomic_add_32
469 #define atomic_add_acq_32 atomic_add_32
470 #define atomic_subtract_rel_32 atomic_subtract_32
471 #define atomic_subtract_acq_32 atomic_subtract_32
472 #define atomic_store_rel_32 atomic_store_32
473 #define atomic_store_rel_long atomic_store_long
474 #define atomic_load_acq_32 atomic_load_32
475 #define atomic_load_acq_long atomic_load_long
476 #define atomic_add_acq_long atomic_add_long
477 #define atomic_add_rel_long atomic_add_long
478 #define atomic_subtract_acq_long atomic_subtract_long
479 #define atomic_subtract_rel_long atomic_subtract_long
480 #define atomic_clear_acq_long atomic_clear_long
481 #define atomic_clear_rel_long atomic_clear_long
482 #define atomic_set_acq_long atomic_set_long
483 #define atomic_set_rel_long atomic_set_long
484 #define atomic_cmpset_acq_long atomic_cmpset_long
485 #define atomic_cmpset_rel_long atomic_cmpset_long
486 #define atomic_load_acq_long atomic_load_long
487 #undef __with_interrupts_disabled
488
489 static __inline void
490 atomic_add_long(volatile u_long *p, u_long v)
491 {
492
493 atomic_add_32((volatile uint32_t *)p, v);
494 }
495
496 static __inline void
497 atomic_clear_long(volatile u_long *p, u_long v)
498 {
499
500 atomic_clear_32((volatile uint32_t *)p, v);
501 }
502
503 static __inline int
504 atomic_cmpset_long(volatile u_long *dst, u_long old, u_long newe)
505 {
506
507 return (atomic_cmpset_32((volatile uint32_t *)dst, old, newe));
508 }
509
510 static __inline u_long
511 atomic_fcmpset_long(volatile u_long *dst, u_long *old, u_long newe)
512 {
513
514 return (atomic_fcmpset_32((volatile uint32_t *)dst,
515 (uint32_t *)old, newe));
516 }
517
518 static __inline u_long
519 atomic_fetchadd_long(volatile u_long *p, u_long v)
520 {
521
522 return (atomic_fetchadd_32((volatile uint32_t *)p, v));
523 }
524
525 static __inline void
526 atomic_readandclear_long(volatile u_long *p)
527 {
528
529 atomic_readandclear_32((volatile uint32_t *)p);
530 }
531
532 static __inline void
533 atomic_set_long(volatile u_long *p, u_long v)
534 {
535
536 atomic_set_32((volatile uint32_t *)p, v);
537 }
538
539 static __inline void
540 atomic_subtract_long(volatile u_long *p, u_long v)
541 {
542
543 atomic_subtract_32((volatile uint32_t *)p, v);
544 }
545
546 /*
547 * ARMv5 does not support SMP. For both kernel and user modes, only a
548 * compiler barrier is needed for fences, since CPU is always
549 * self-consistent.
550 */
551 static __inline void
552 atomic_thread_fence_acq(void)
553 {
554
555 __compiler_membar();
556 }
557
558 static __inline void
559 atomic_thread_fence_rel(void)
560 {
561
562 __compiler_membar();
563 }
564
565 static __inline void
566 atomic_thread_fence_acq_rel(void)
567 {
568
569 __compiler_membar();
570 }
571
572 static __inline void
573 atomic_thread_fence_seq_cst(void)
574 {
575
576 __compiler_membar();
577 }
578
579 #endif /* _MACHINE_ATOMIC_H_ */
Cache object: 0af6fea8e8bc2832f97096beaa400ae5
|