1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2019 Kyle Evans <kevans@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29 #ifndef _SYS__ATOMIC_SUBWORD_H_
30 #define _SYS__ATOMIC_SUBWORD_H_
31
32 /*
33 * This header is specifically for platforms that either do not have ways to or
34 * simply do not do sub-word atomic operations. These are not ideal as they
35 * require a little more effort to make sure our atomic operations are failing
36 * because of the bits of the word we're trying to write rather than the rest
37 * of the word.
38 */
39 #ifndef _MACHINE_ATOMIC_H_
40 #error do not include this header, use machine/atomic.h
41 #endif
42
43 #include <machine/endian.h>
44
45 #ifndef NBBY
46 #define NBBY 8
47 #endif
48
49 #define _ATOMIC_WORD_ALIGNED(p) \
50 (uint32_t *)((__uintptr_t)(p) - ((__uintptr_t)(p) % 4))
51
52 #if _BYTE_ORDER == _BIG_ENDIAN
53 #define _ATOMIC_BYTE_SHIFT(p) \
54 ((3 - ((__uintptr_t)(p) % 4)) * NBBY)
55
56 #define _ATOMIC_HWORD_SHIFT(p) \
57 ((2 - ((__uintptr_t)(p) % 4)) * NBBY)
58 #else
59 #define _ATOMIC_BYTE_SHIFT(p) \
60 ((((__uintptr_t)(p) % 4)) * NBBY)
61
62 #define _ATOMIC_HWORD_SHIFT(p) \
63 ((((__uintptr_t)(p) % 4)) * NBBY)
64 #endif
65
66 #ifndef _atomic_cmpset_masked_word
67 /*
68 * Pass these bad boys a couple words and a mask of the bits you care about,
69 * they'll loop until we either succeed or fail because of those bits rather
70 * than the ones we're not masking. old and val should already be preshifted to
71 * the proper position.
72 */
73 static __inline int
74 _atomic_cmpset_masked_word(uint32_t *addr, uint32_t old, uint32_t val,
75 uint32_t mask)
76 {
77 int ret;
78 uint32_t wcomp;
79
80 wcomp = old;
81
82 /*
83 * We'll attempt the cmpset on the entire word. Loop here in case the
84 * operation fails due to the other half-word resident in that word,
85 * rather than the half-word we're trying to operate on. Ideally we
86 * only take one trip through here. We'll have to recalculate the old
87 * value since it's the other part of the word changing.
88 */
89 do {
90 old = (*addr & ~mask) | wcomp;
91 ret = atomic_fcmpset_32(addr, &old, (old & ~mask) | val);
92 } while (ret == 0 && (old & mask) == wcomp);
93
94 return (ret);
95 }
96 #endif
97
98 #ifndef _atomic_fcmpset_masked_word
99 static __inline int
100 _atomic_fcmpset_masked_word(uint32_t *addr, uint32_t *old, uint32_t val,
101 uint32_t mask)
102 {
103
104 /*
105 * fcmpset_* is documented in atomic(9) to allow spurious failures where
106 * *old == val on ll/sc architectures because the sc may fail due to
107 * parallel writes or other reasons. We take advantage of that here
108 * and only attempt once, because the caller should be compensating for
109 * that possibility.
110 */
111 *old = (*addr & ~mask) | *old;
112 return (atomic_fcmpset_32(addr, old, (*old & ~mask) | val));
113 }
114 #endif
115
116 static __inline int
117 atomic_cmpset_8(__volatile uint8_t *addr, uint8_t old, uint8_t val)
118 {
119 int shift;
120
121 shift = _ATOMIC_BYTE_SHIFT(addr);
122
123 return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
124 old << shift, val << shift, 0xff << shift));
125 }
126
127 static __inline int
128 atomic_fcmpset_8(__volatile uint8_t *addr, uint8_t *old, uint8_t val)
129 {
130 int ret, shift;
131 uint32_t wold;
132
133 shift = _ATOMIC_BYTE_SHIFT(addr);
134 wold = *old << shift;
135 ret = _atomic_fcmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
136 &wold, val << shift, 0xff << shift);
137 if (ret == 0)
138 *old = (wold >> shift) & 0xff;
139 return (ret);
140 }
141
142 static __inline int
143 atomic_cmpset_16(__volatile uint16_t *addr, uint16_t old, uint16_t val)
144 {
145 int shift;
146
147 shift = _ATOMIC_HWORD_SHIFT(addr);
148
149 return (_atomic_cmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
150 old << shift, val << shift, 0xffff << shift));
151 }
152
153 static __inline int
154 atomic_fcmpset_16(__volatile uint16_t *addr, uint16_t *old, uint16_t val)
155 {
156 int ret, shift;
157 uint32_t wold;
158
159 shift = _ATOMIC_HWORD_SHIFT(addr);
160 wold = *old << shift;
161 ret = _atomic_fcmpset_masked_word(_ATOMIC_WORD_ALIGNED(addr),
162 &wold, val << shift, 0xffff << shift);
163 if (ret == 0)
164 *old = (wold >> shift) & 0xffff;
165 return (ret);
166 }
167
168 #undef _ATOMIC_WORD_ALIGNED
169 #undef _ATOMIC_BYTE_SHIFT
170 #undef _ATOMIC_HWORD_SHIFT
171
172 #endif /* _SYS__ATOMIC_SUBWORD_H_ */
Cache object: 3546dbcc5489935b57006cd656aa68a7
|