1 /*-
2 * Copyright (c) 2013 Ed Schouten <ed@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Copyright (c) 1998 Doug Rabson
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/10.0/sys/mips/mips/stdatomic.c 251781 2013-06-15 08:15:22Z ed $");
32
33 #include <sys/stdatomic.h>
34 #include <sys/types.h>
35
36 #ifdef _KERNEL
37 #include "opt_global.h"
38 #endif
39
40 #if defined(__SYNC_ATOMICS)
41
42 /*
43 * Memory barriers.
44 *
45 * It turns out __sync_synchronize() does not emit any code when used
46 * with GCC 4.2. Implement our own version that does work reliably.
47 *
48 * Although __sync_lock_test_and_set() should only perform an acquire
49 * barrier, make it do a full barrier like the other functions. This
50 * should make <stdatomic.h>'s atomic_exchange_explicit() work reliably.
51 */
52
53 static inline void
54 do_sync(void)
55 {
56
57 __asm volatile (
58 #if !defined(_KERNEL) || defined(SMP)
59 ".set noreorder\n"
60 "\tsync\n"
61 "\tnop\n"
62 "\tnop\n"
63 "\tnop\n"
64 "\tnop\n"
65 "\tnop\n"
66 "\tnop\n"
67 "\tnop\n"
68 "\tnop\n"
69 ".set reorder\n"
70 #else /* _KERNEL && !SMP */
71 ""
72 #endif /* !KERNEL || SMP */
73 : : : "memory");
74 }
75
76 typedef union {
77 uint8_t v8[4];
78 uint32_t v32;
79 } reg_t;
80
81 /*
82 * Given a memory address pointing to an 8-bit or 16-bit integer, return
83 * the address of the 32-bit word containing it.
84 */
85
86 static inline uint32_t *
87 round_to_word(void *ptr)
88 {
89
90 return ((uint32_t *)((intptr_t)ptr & ~3));
91 }
92
93 /*
94 * Utility functions for loading and storing 8-bit and 16-bit integers
95 * in 32-bit words at an offset corresponding with the location of the
96 * atomic variable.
97 */
98
99 static inline void
100 put_1(reg_t *r, const uint8_t *offset_ptr, uint8_t val)
101 {
102 size_t offset;
103
104 offset = (intptr_t)offset_ptr & 3;
105 r->v8[offset] = val;
106 }
107
108 static inline uint8_t
109 get_1(const reg_t *r, const uint8_t *offset_ptr)
110 {
111 size_t offset;
112
113 offset = (intptr_t)offset_ptr & 3;
114 return (r->v8[offset]);
115 }
116
117 static inline void
118 put_2(reg_t *r, const uint16_t *offset_ptr, uint16_t val)
119 {
120 size_t offset;
121 union {
122 uint16_t in;
123 uint8_t out[2];
124 } bytes;
125
126 offset = (intptr_t)offset_ptr & 3;
127 bytes.in = val;
128 r->v8[offset] = bytes.out[0];
129 r->v8[offset + 1] = bytes.out[1];
130 }
131
132 static inline uint16_t
133 get_2(const reg_t *r, const uint16_t *offset_ptr)
134 {
135 size_t offset;
136 union {
137 uint8_t in[2];
138 uint16_t out;
139 } bytes;
140
141 offset = (intptr_t)offset_ptr & 3;
142 bytes.in[0] = r->v8[offset];
143 bytes.in[1] = r->v8[offset + 1];
144 return (bytes.out);
145 }
146
147 /*
148 * 8-bit and 16-bit routines.
149 *
150 * These operations are not natively supported by the CPU, so we use
151 * some shifting and bitmasking on top of the 32-bit instructions.
152 */
153
154 #define EMIT_LOCK_TEST_AND_SET_N(N, uintN_t) \
155 uintN_t \
156 __sync_lock_test_and_set_##N(uintN_t *mem, uintN_t val) \
157 { \
158 uint32_t *mem32; \
159 reg_t val32, negmask, old; \
160 uint32_t temp; \
161 \
162 mem32 = round_to_word(mem); \
163 val32.v32 = 0x00000000; \
164 put_##N(&val32, mem, val); \
165 negmask.v32 = 0xffffffff; \
166 put_##N(&negmask, mem, 0); \
167 \
168 do_sync(); \
169 __asm volatile ( \
170 "1:" \
171 "\tll %0, %5\n" /* Load old value. */ \
172 "\tand %2, %4, %0\n" /* Remove the old value. */ \
173 "\tor %2, %3\n" /* Put in the new value. */ \
174 "\tsc %2, %1\n" /* Attempt to store. */ \
175 "\tbeqz %2, 1b\n" /* Spin if failed. */ \
176 : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp) \
177 : "r" (val32.v32), "r" (negmask.v32), "m" (*mem32)); \
178 return (get_##N(&old, mem)); \
179 }
180
181 EMIT_LOCK_TEST_AND_SET_N(1, uint8_t)
182 EMIT_LOCK_TEST_AND_SET_N(2, uint16_t)
183
184 #define EMIT_VAL_COMPARE_AND_SWAP_N(N, uintN_t) \
185 uintN_t \
186 __sync_val_compare_and_swap_##N(uintN_t *mem, uintN_t expected, \
187 uintN_t desired) \
188 { \
189 uint32_t *mem32; \
190 reg_t expected32, desired32, posmask, old; \
191 uint32_t negmask, temp; \
192 \
193 mem32 = round_to_word(mem); \
194 expected32.v32 = 0x00000000; \
195 put_##N(&expected32, mem, expected); \
196 desired32.v32 = 0x00000000; \
197 put_##N(&desired32, mem, desired); \
198 posmask.v32 = 0x00000000; \
199 put_##N(&posmask, mem, ~0); \
200 negmask = ~posmask.v32; \
201 \
202 do_sync(); \
203 __asm volatile ( \
204 "1:" \
205 "\tll %0, %7\n" /* Load old value. */ \
206 "\tand %2, %5, %0\n" /* Isolate the old value. */ \
207 "\tbne %2, %3, 2f\n" /* Compare to expected value. */\
208 "\tand %2, %6, %0\n" /* Remove the old value. */ \
209 "\tor %2, %4\n" /* Put in the new value. */ \
210 "\tsc %2, %1\n" /* Attempt to store. */ \
211 "\tbeqz %2, 1b\n" /* Spin if failed. */ \
212 "2:" \
213 : "=&r" (old), "=m" (*mem32), "=&r" (temp) \
214 : "r" (expected32.v32), "r" (desired32.v32), \
215 "r" (posmask.v32), "r" (negmask), "m" (*mem32)); \
216 return (get_##N(&old, mem)); \
217 }
218
219 EMIT_VAL_COMPARE_AND_SWAP_N(1, uint8_t)
220 EMIT_VAL_COMPARE_AND_SWAP_N(2, uint16_t)
221
222 #define EMIT_ARITHMETIC_FETCH_AND_OP_N(N, uintN_t, name, op) \
223 uintN_t \
224 __sync_##name##_##N(uintN_t *mem, uintN_t val) \
225 { \
226 uint32_t *mem32; \
227 reg_t val32, posmask, old; \
228 uint32_t negmask, temp1, temp2; \
229 \
230 mem32 = round_to_word(mem); \
231 val32.v32 = 0x00000000; \
232 put_##N(&val32, mem, val); \
233 posmask.v32 = 0x00000000; \
234 put_##N(&posmask, mem, ~0); \
235 negmask = ~posmask.v32; \
236 \
237 do_sync(); \
238 __asm volatile ( \
239 "1:" \
240 "\tll %0, %7\n" /* Load old value. */ \
241 "\t"op" %2, %0, %4\n" /* Calculate new value. */ \
242 "\tand %2, %5\n" /* Isolate the new value. */ \
243 "\tand %3, %6, %0\n" /* Remove the old value. */ \
244 "\tor %2, %3\n" /* Put in the new value. */ \
245 "\tsc %2, %1\n" /* Attempt to store. */ \
246 "\tbeqz %2, 1b\n" /* Spin if failed. */ \
247 : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp1), \
248 "=&r" (temp2) \
249 : "r" (val32.v32), "r" (posmask.v32), "r" (negmask), \
250 "m" (*mem32)); \
251 return (get_##N(&old, mem)); \
252 }
253
254 EMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_add, "addu")
255 EMIT_ARITHMETIC_FETCH_AND_OP_N(1, uint8_t, fetch_and_sub, "subu")
256 EMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_add, "addu")
257 EMIT_ARITHMETIC_FETCH_AND_OP_N(2, uint16_t, fetch_and_sub, "subu")
258
259 #define EMIT_BITWISE_FETCH_AND_OP_N(N, uintN_t, name, op, idempotence) \
260 uintN_t \
261 __sync_##name##_##N(uintN_t *mem, uintN_t val) \
262 { \
263 uint32_t *mem32; \
264 reg_t val32, old; \
265 uint32_t temp; \
266 \
267 mem32 = round_to_word(mem); \
268 val32.v32 = idempotence ? 0xffffffff : 0x00000000; \
269 put_##N(&val32, mem, val); \
270 \
271 do_sync(); \
272 __asm volatile ( \
273 "1:" \
274 "\tll %0, %4\n" /* Load old value. */ \
275 "\t"op" %2, %3, %0\n" /* Calculate new value. */ \
276 "\tsc %2, %1\n" /* Attempt to store. */ \
277 "\tbeqz %2, 1b\n" /* Spin if failed. */ \
278 : "=&r" (old.v32), "=m" (*mem32), "=&r" (temp) \
279 : "r" (val32.v32), "m" (*mem32)); \
280 return (get_##N(&old, mem)); \
281 }
282
283 EMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_and, "and", 1)
284 EMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_or, "or", 0)
285 EMIT_BITWISE_FETCH_AND_OP_N(1, uint8_t, fetch_and_xor, "xor", 0)
286 EMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_and, "and", 1)
287 EMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_or, "or", 0)
288 EMIT_BITWISE_FETCH_AND_OP_N(2, uint16_t, fetch_and_xor, "xor", 0)
289
290 /*
291 * 32-bit routines.
292 */
293
294 uint32_t
295 __sync_val_compare_and_swap_4(uint32_t *mem, uint32_t expected,
296 uint32_t desired)
297 {
298 uint32_t old, temp;
299
300 do_sync();
301 __asm volatile (
302 "1:"
303 "\tll %0, %5\n" /* Load old value. */
304 "\tbne %0, %3, 2f\n" /* Compare to expected value. */
305 "\tmove %2, %4\n" /* Value to store. */
306 "\tsc %2, %1\n" /* Attempt to store. */
307 "\tbeqz %2, 1b\n" /* Spin if failed. */
308 "2:"
309 : "=&r" (old), "=m" (*mem), "=&r" (temp)
310 : "r" (expected), "r" (desired), "m" (*mem));
311 return (old);
312 }
313
314 #define EMIT_FETCH_AND_OP_4(name, op) \
315 uint32_t \
316 __sync_##name##_4(uint32_t *mem, uint32_t val) \
317 { \
318 uint32_t old, temp; \
319 \
320 do_sync(); \
321 __asm volatile ( \
322 "1:" \
323 "\tll %0, %4\n" /* Load old value. */ \
324 "\t"op"\n" /* Calculate new value. */ \
325 "\tsc %2, %1\n" /* Attempt to store. */ \
326 "\tbeqz %2, 1b\n" /* Spin if failed. */ \
327 : "=&r" (old), "=m" (*mem), "=&r" (temp) \
328 : "r" (val), "m" (*mem)); \
329 return (old); \
330 }
331
332 EMIT_FETCH_AND_OP_4(lock_test_and_set, "move %2, %3")
333 EMIT_FETCH_AND_OP_4(fetch_and_add, "addu %2, %0, %3")
334 EMIT_FETCH_AND_OP_4(fetch_and_and, "and %2, %0, %3")
335 EMIT_FETCH_AND_OP_4(fetch_and_or, "or %2, %0, %3")
336 EMIT_FETCH_AND_OP_4(fetch_and_sub, "subu %2, %0, %3")
337 EMIT_FETCH_AND_OP_4(fetch_and_xor, "xor %2, %0, %3")
338
339 /*
340 * 64-bit routines.
341 *
342 * Note: All the 64-bit atomic operations are only atomic when running
343 * in 64-bit mode. It is assumed that code compiled for n32 and n64 fits
344 * into this definition and no further safeties are needed.
345 */
346
347 #if defined(__mips_n32) || defined(__mips_n64)
348
349 uint64_t
350 __sync_val_compare_and_swap_8(uint64_t *mem, uint64_t expected,
351 uint64_t desired)
352 {
353 uint64_t old, temp;
354
355 do_sync();
356 __asm volatile (
357 "1:"
358 "\tlld %0, %5\n" /* Load old value. */
359 "\tbne %0, %3, 2f\n" /* Compare to expected value. */
360 "\tmove %2, %4\n" /* Value to store. */
361 "\tscd %2, %1\n" /* Attempt to store. */
362 "\tbeqz %2, 1b\n" /* Spin if failed. */
363 "2:"
364 : "=&r" (old), "=m" (*mem), "=&r" (temp)
365 : "r" (expected), "r" (desired), "m" (*mem));
366 return (old);
367 }
368
369 #define EMIT_FETCH_AND_OP_8(name, op) \
370 uint64_t \
371 __sync_##name##_8(uint64_t *mem, uint64_t val) \
372 { \
373 uint64_t old, temp; \
374 \
375 do_sync(); \
376 __asm volatile ( \
377 "1:" \
378 "\tlld %0, %4\n" /* Load old value. */ \
379 "\t"op"\n" /* Calculate new value. */ \
380 "\tscd %2, %1\n" /* Attempt to store. */ \
381 "\tbeqz %2, 1b\n" /* Spin if failed. */ \
382 : "=&r" (old), "=m" (*mem), "=&r" (temp) \
383 : "r" (val), "m" (*mem)); \
384 return (old); \
385 }
386
387 EMIT_FETCH_AND_OP_8(lock_test_and_set, "move %2, %3")
388 EMIT_FETCH_AND_OP_8(fetch_and_add, "daddu %2, %0, %3")
389 EMIT_FETCH_AND_OP_8(fetch_and_and, "and %2, %0, %3")
390 EMIT_FETCH_AND_OP_8(fetch_and_or, "or %2, %0, %3")
391 EMIT_FETCH_AND_OP_8(fetch_and_sub, "dsubu %2, %0, %3")
392 EMIT_FETCH_AND_OP_8(fetch_and_xor, "xor %2, %0, %3")
393
394 #endif /* __mips_n32 || __mips_n64 */
395
396 #endif /* __SYNC_ATOMICS */
Cache object: f513e1cf6d7675f06aaa94713612b411
|