The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1998 Doug Rabson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *      from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
   29  * $FreeBSD: releng/12.0/sys/mips/include/atomic.h 327097 2017-12-22 23:27:03Z kib $
   30  */
   31 
   32 #ifndef _MACHINE_ATOMIC_H_
   33 #define _MACHINE_ATOMIC_H_
   34 
   35 #ifndef _SYS_CDEFS_H_
   36 #error this file needs sys/cdefs.h as a prerequisite
   37 #endif
   38 
   39 #include <sys/atomic_common.h>
   40 
   41 /*
   42  * Note: All the 64-bit atomic operations are only atomic when running
   43  * in 64-bit mode.  It is assumed that code compiled for n32 and n64
   44  * fits into this definition and no further safeties are needed.
   45  *
   46  * It is also assumed that the add, subtract and other arithmetic is
   47  * done on numbers not pointers.  The special rules for n32 pointers
   48  * do not have atomic operations defined for them, but generally shouldn't
   49  * need atomic operations.
   50  */
   51 #ifndef __MIPS_PLATFORM_SYNC_NOPS
   52 #define __MIPS_PLATFORM_SYNC_NOPS ""
   53 #endif
   54 
   55 static __inline  void
   56 mips_sync(void)
   57 {
   58         __asm __volatile (".set noreorder\n"
   59                         "\tsync\n"
   60                         __MIPS_PLATFORM_SYNC_NOPS
   61                         ".set reorder\n"
   62                         : : : "memory");
   63 }
   64 
   65 #define mb()    mips_sync()
   66 #define wmb()   mips_sync()
   67 #define rmb()   mips_sync()
   68 
   69 /*
   70  * Various simple arithmetic on memory which is atomic in the presence
   71  * of interrupts and SMP safe.
   72  */
   73 
   74 void atomic_set_8(__volatile uint8_t *, uint8_t);
   75 void atomic_clear_8(__volatile uint8_t *, uint8_t);
   76 void atomic_add_8(__volatile uint8_t *, uint8_t);
   77 void atomic_subtract_8(__volatile uint8_t *, uint8_t);
   78 
   79 void atomic_set_16(__volatile uint16_t *, uint16_t);
   80 void atomic_clear_16(__volatile uint16_t *, uint16_t);
   81 void atomic_add_16(__volatile uint16_t *, uint16_t);
   82 void atomic_subtract_16(__volatile uint16_t *, uint16_t);
   83 
   84 static __inline void
   85 atomic_set_32(__volatile uint32_t *p, uint32_t v)
   86 {
   87         uint32_t temp;
   88 
   89         __asm __volatile (
   90                 "1:\tll %0, %3\n\t"             /* load old value */
   91                 "or     %0, %2, %0\n\t"         /* calculate new value */
   92                 "sc     %0, %1\n\t"             /* attempt to store */
   93                 "beqz   %0, 1b\n\t"             /* spin if failed */
   94                 : "=&r" (temp), "=m" (*p)
   95                 : "r" (v), "m" (*p)
   96                 : "memory");
   97 
   98 }
   99 
  100 static __inline void
  101 atomic_clear_32(__volatile uint32_t *p, uint32_t v)
  102 {
  103         uint32_t temp;
  104         v = ~v;
  105 
  106         __asm __volatile (
  107                 "1:\tll %0, %3\n\t"             /* load old value */
  108                 "and    %0, %2, %0\n\t"         /* calculate new value */
  109                 "sc     %0, %1\n\t"             /* attempt to store */
  110                 "beqz   %0, 1b\n\t"             /* spin if failed */
  111                 : "=&r" (temp), "=m" (*p)
  112                 : "r" (v), "m" (*p)
  113                 : "memory");
  114 }
  115 
  116 static __inline void
  117 atomic_add_32(__volatile uint32_t *p, uint32_t v)
  118 {
  119         uint32_t temp;
  120 
  121         __asm __volatile (
  122                 "1:\tll %0, %3\n\t"             /* load old value */
  123                 "addu   %0, %2, %0\n\t"         /* calculate new value */
  124                 "sc     %0, %1\n\t"             /* attempt to store */
  125                 "beqz   %0, 1b\n\t"             /* spin if failed */
  126                 : "=&r" (temp), "=m" (*p)
  127                 : "r" (v), "m" (*p)
  128                 : "memory");
  129 }
  130 
  131 static __inline void
  132 atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
  133 {
  134         uint32_t temp;
  135 
  136         __asm __volatile (
  137                 "1:\tll %0, %3\n\t"             /* load old value */
  138                 "subu   %0, %2\n\t"             /* calculate new value */
  139                 "sc     %0, %1\n\t"             /* attempt to store */
  140                 "beqz   %0, 1b\n\t"             /* spin if failed */
  141                 : "=&r" (temp), "=m" (*p)
  142                 : "r" (v), "m" (*p)
  143                 : "memory");
  144 }
  145 
  146 static __inline uint32_t
  147 atomic_readandclear_32(__volatile uint32_t *addr)
  148 {
  149         uint32_t result,temp;
  150 
  151         __asm __volatile (
  152                 "1:\tll  %0,%3\n\t"     /* load current value, asserting lock */
  153                 "li      %1,0\n\t"              /* value to store */
  154                 "sc      %1,%2\n\t"     /* attempt to store */
  155                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  156                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  157                 : "m" (*addr)
  158                 : "memory");
  159 
  160         return result;
  161 }
  162 
  163 static __inline uint32_t
  164 atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
  165 {
  166         uint32_t result,temp;
  167 
  168         __asm __volatile (
  169                 "1:\tll  %0,%3\n\t"     /* load current value, asserting lock */
  170                 "or      %1,$0,%4\n\t"
  171                 "sc      %1,%2\n\t"     /* attempt to store */
  172                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  173                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  174                 : "m" (*addr), "r" (value)
  175                 : "memory");
  176 
  177         return result;
  178 }
  179 
  180 #if defined(__mips_n64) || defined(__mips_n32)
  181 static __inline void
  182 atomic_set_64(__volatile uint64_t *p, uint64_t v)
  183 {
  184         uint64_t temp;
  185 
  186         __asm __volatile (
  187                 "1:\n\t"
  188                 "lld    %0, %3\n\t"             /* load old value */
  189                 "or     %0, %2, %0\n\t"         /* calculate new value */
  190                 "scd    %0, %1\n\t"             /* attempt to store */
  191                 "beqz   %0, 1b\n\t"             /* spin if failed */
  192                 : "=&r" (temp), "=m" (*p)
  193                 : "r" (v), "m" (*p)
  194                 : "memory");
  195 
  196 }
  197 
  198 static __inline void
  199 atomic_clear_64(__volatile uint64_t *p, uint64_t v)
  200 {
  201         uint64_t temp;
  202         v = ~v;
  203 
  204         __asm __volatile (
  205                 "1:\n\t"
  206                 "lld    %0, %3\n\t"             /* load old value */
  207                 "and    %0, %2, %0\n\t"         /* calculate new value */
  208                 "scd    %0, %1\n\t"             /* attempt to store */
  209                 "beqz   %0, 1b\n\t"             /* spin if failed */
  210                 : "=&r" (temp), "=m" (*p)
  211                 : "r" (v), "m" (*p)
  212                 : "memory");
  213 }
  214 
  215 static __inline void
  216 atomic_add_64(__volatile uint64_t *p, uint64_t v)
  217 {
  218         uint64_t temp;
  219 
  220         __asm __volatile (
  221                 "1:\n\t"
  222                 "lld    %0, %3\n\t"             /* load old value */
  223                 "daddu  %0, %2, %0\n\t"         /* calculate new value */
  224                 "scd    %0, %1\n\t"             /* attempt to store */
  225                 "beqz   %0, 1b\n\t"             /* spin if failed */
  226                 : "=&r" (temp), "=m" (*p)
  227                 : "r" (v), "m" (*p)
  228                 : "memory");
  229 }
  230 
  231 static __inline void
  232 atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
  233 {
  234         uint64_t temp;
  235 
  236         __asm __volatile (
  237                 "1:\n\t"
  238                 "lld    %0, %3\n\t"             /* load old value */
  239                 "dsubu  %0, %2\n\t"             /* calculate new value */
  240                 "scd    %0, %1\n\t"             /* attempt to store */
  241                 "beqz   %0, 1b\n\t"             /* spin if failed */
  242                 : "=&r" (temp), "=m" (*p)
  243                 : "r" (v), "m" (*p)
  244                 : "memory");
  245 }
  246 
  247 static __inline uint64_t
  248 atomic_readandclear_64(__volatile uint64_t *addr)
  249 {
  250         uint64_t result,temp;
  251 
  252         __asm __volatile (
  253                 "1:\n\t"
  254                 "lld     %0, %3\n\t"            /* load old value */
  255                 "li      %1, 0\n\t"             /* value to store */
  256                 "scd     %1, %2\n\t"            /* attempt to store */
  257                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  258                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  259                 : "m" (*addr)
  260                 : "memory");
  261 
  262         return result;
  263 }
  264 
  265 static __inline uint64_t
  266 atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
  267 {
  268         uint64_t result,temp;
  269 
  270         __asm __volatile (
  271                 "1:\n\t"
  272                 "lld     %0,%3\n\t"             /* Load old value*/
  273                 "or      %1,$0,%4\n\t"
  274                 "scd     %1,%2\n\t"             /* attempt to store */
  275                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  276                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  277                 : "m" (*addr), "r" (value)
  278                 : "memory");
  279 
  280         return result;
  281 }
  282 #endif
  283 
  284 #define ATOMIC_ACQ_REL(NAME, WIDTH)                                     \
  285 static __inline  void                                                   \
  286 atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
  287 {                                                                       \
  288         atomic_##NAME##_##WIDTH(p, v);                                  \
  289         mips_sync();                                                    \
  290 }                                                                       \
  291                                                                         \
  292 static __inline  void                                                   \
  293 atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
  294 {                                                                       \
  295         mips_sync();                                                    \
  296         atomic_##NAME##_##WIDTH(p, v);                                  \
  297 }
  298 
  299 /* Variants of simple arithmetic with memory barriers. */
  300 ATOMIC_ACQ_REL(set, 8)
  301 ATOMIC_ACQ_REL(clear, 8)
  302 ATOMIC_ACQ_REL(add, 8)
  303 ATOMIC_ACQ_REL(subtract, 8)
  304 ATOMIC_ACQ_REL(set, 16)
  305 ATOMIC_ACQ_REL(clear, 16)
  306 ATOMIC_ACQ_REL(add, 16)
  307 ATOMIC_ACQ_REL(subtract, 16)
  308 ATOMIC_ACQ_REL(set, 32)
  309 ATOMIC_ACQ_REL(clear, 32)
  310 ATOMIC_ACQ_REL(add, 32)
  311 ATOMIC_ACQ_REL(subtract, 32)
  312 #if defined(__mips_n64) || defined(__mips_n32)
  313 ATOMIC_ACQ_REL(set, 64)
  314 ATOMIC_ACQ_REL(clear, 64)
  315 ATOMIC_ACQ_REL(add, 64)
  316 ATOMIC_ACQ_REL(subtract, 64)
  317 #endif
  318 
  319 #undef ATOMIC_ACQ_REL
  320 
  321 /*
  322  * We assume that a = b will do atomic loads and stores.
  323  */
  324 #define ATOMIC_STORE_LOAD(WIDTH)                        \
  325 static __inline  uint##WIDTH##_t                        \
  326 atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p)  \
  327 {                                                       \
  328         uint##WIDTH##_t v;                              \
  329                                                         \
  330         v = *p;                                         \
  331         mips_sync();                                    \
  332         return (v);                                     \
  333 }                                                       \
  334                                                         \
  335 static __inline  void                                   \
  336 atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
  337 {                                                       \
  338         mips_sync();                                    \
  339         *p = v;                                         \
  340 }
  341 
  342 ATOMIC_STORE_LOAD(32)
  343 ATOMIC_STORE_LOAD(64)
  344 #undef ATOMIC_STORE_LOAD
  345 
  346 /*
  347  * Atomically compare the value stored at *p with cmpval and if the
  348  * two values are equal, update the value of *p with newval. Returns
  349  * zero if the compare failed, nonzero otherwise.
  350  */
  351 static __inline uint32_t
  352 atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
  353 {
  354         uint32_t ret;
  355 
  356         __asm __volatile (
  357                 "1:\tll %0, %4\n\t"             /* load old value */
  358                 "bne %0, %2, 2f\n\t"            /* compare */
  359                 "move %0, %3\n\t"               /* value to store */
  360                 "sc %0, %1\n\t"                 /* attempt to store */
  361                 "beqz %0, 1b\n\t"               /* if it failed, spin */
  362                 "j 3f\n\t"
  363                 "2:\n\t"
  364                 "li     %0, 0\n\t"
  365                 "3:\n"
  366                 : "=&r" (ret), "=m" (*p)
  367                 : "r" (cmpval), "r" (newval), "m" (*p)
  368                 : "memory");
  369 
  370         return ret;
  371 }
  372 
  373 /*
  374  * Atomically compare the value stored at *p with cmpval and if the
  375  * two values are equal, update the value of *p with newval. Returns
  376  * zero if the compare failed, nonzero otherwise.
  377  */
  378 static __inline uint32_t
  379 atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
  380 {
  381         int retval;
  382 
  383         retval = atomic_cmpset_32(p, cmpval, newval);
  384         mips_sync();
  385         return (retval);
  386 }
  387 
  388 static __inline uint32_t
  389 atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
  390 {
  391         mips_sync();
  392         return (atomic_cmpset_32(p, cmpval, newval));
  393 }
  394 
  395 static __inline uint32_t
  396 atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
  397 {
  398         uint32_t ret;
  399 
  400         __asm __volatile (
  401                 "1:\n\t"
  402                 "ll     %0, %1\n\t"             /* load old value */
  403                 "bne    %0, %4, 2f\n\t"         /* compare */
  404                 "move   %0, %3\n\t"             /* value to store */
  405                 "sc     %0, %1\n\t"             /* attempt to store */
  406                 "beqz   %0, 1b\n\t"             /* if it failed, spin */
  407                 "j      3f\n\t"
  408                 "2:\n\t"
  409                 "sw     %0, %2\n\t"             /* save old value */
  410                 "li     %0, 0\n\t"
  411                 "3:\n"
  412                 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
  413                 : "r" (newval), "r" (*cmpval)
  414                 : "memory");
  415         return ret;
  416 }
  417 
  418 static __inline uint32_t
  419 atomic_fcmpset_acq_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
  420 {
  421         int retval;
  422 
  423         retval = atomic_fcmpset_32(p, cmpval, newval);
  424         mips_sync();
  425         return (retval);
  426 }
  427 
  428 static __inline uint32_t
  429 atomic_fcmpset_rel_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
  430 {
  431         mips_sync();
  432         return (atomic_fcmpset_32(p, cmpval, newval));
  433 }
  434 
  435 /*
  436  * Atomically add the value of v to the integer pointed to by p and return
  437  * the previous value of *p.
  438  */
  439 static __inline uint32_t
  440 atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
  441 {
  442         uint32_t value, temp;
  443 
  444         __asm __volatile (
  445                 "1:\tll %0, %1\n\t"             /* load old value */
  446                 "addu %2, %3, %0\n\t"           /* calculate new value */
  447                 "sc %2, %1\n\t"                 /* attempt to store */
  448                 "beqz %2, 1b\n\t"               /* spin if failed */
  449                 : "=&r" (value), "=m" (*p), "=&r" (temp)
  450                 : "r" (v), "m" (*p));
  451         return (value);
  452 }
  453 
  454 #if defined(__mips_n64) || defined(__mips_n32)
  455 /*
  456  * Atomically compare the value stored at *p with cmpval and if the
  457  * two values are equal, update the value of *p with newval. Returns
  458  * zero if the compare failed, nonzero otherwise.
  459  */
  460 static __inline uint64_t
  461 atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
  462 {
  463         uint64_t ret;
  464 
  465         __asm __volatile (
  466                 "1:\n\t"
  467                 "lld    %0, %4\n\t"             /* load old value */
  468                 "bne    %0, %2, 2f\n\t"         /* compare */
  469                 "move   %0, %3\n\t"             /* value to store */
  470                 "scd    %0, %1\n\t"             /* attempt to store */
  471                 "beqz   %0, 1b\n\t"             /* if it failed, spin */
  472                 "j      3f\n\t"
  473                 "2:\n\t"
  474                 "li     %0, 0\n\t"
  475                 "3:\n"
  476                 : "=&r" (ret), "=m" (*p)
  477                 : "r" (cmpval), "r" (newval), "m" (*p)
  478                 : "memory");
  479 
  480         return ret;
  481 }
  482 
  483 /*
  484  * Atomically compare the value stored at *p with cmpval and if the
  485  * two values are equal, update the value of *p with newval. Returns
  486  * zero if the compare failed, nonzero otherwise.
  487  */
  488 static __inline uint64_t
  489 atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
  490 {
  491         int retval;
  492 
  493         retval = atomic_cmpset_64(p, cmpval, newval);
  494         mips_sync();
  495         return (retval);
  496 }
  497 
  498 static __inline uint64_t
  499 atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
  500 {
  501         mips_sync();
  502         return (atomic_cmpset_64(p, cmpval, newval));
  503 }
  504 
  505 static __inline uint32_t
  506 atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
  507 {
  508         uint32_t ret;
  509 
  510         __asm __volatile (
  511                 "1:\n\t"
  512                 "lld    %0, %1\n\t"             /* load old value */
  513                 "bne    %0, %4, 2f\n\t"         /* compare */
  514                 "move   %0, %3\n\t"             /* value to store */
  515                 "scd    %0, %1\n\t"             /* attempt to store */
  516                 "beqz   %0, 1b\n\t"             /* if it failed, spin */
  517                 "j      3f\n\t"
  518                 "2:\n\t"
  519                 "sd     %0, %2\n\t"             /* save old value */
  520                 "li     %0, 0\n\t"
  521                 "3:\n"
  522                 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
  523                 : "r" (newval), "r" (*cmpval)
  524                 : "memory");
  525 
  526         return ret;
  527 }
  528 
  529 static __inline uint64_t
  530 atomic_fcmpset_acq_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
  531 {
  532         int retval;
  533 
  534         retval = atomic_fcmpset_64(p, cmpval, newval);
  535         mips_sync();
  536         return (retval);
  537 }
  538 
  539 static __inline uint64_t
  540 atomic_fcmpset_rel_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
  541 {
  542         mips_sync();
  543         return (atomic_fcmpset_64(p, cmpval, newval));
  544 }
  545 
  546 /*
  547  * Atomically add the value of v to the integer pointed to by p and return
  548  * the previous value of *p.
  549  */
  550 static __inline uint64_t
  551 atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
  552 {
  553         uint64_t value, temp;
  554 
  555         __asm __volatile (
  556                 "1:\n\t"
  557                 "lld    %0, %1\n\t"             /* load old value */
  558                 "daddu  %2, %3, %0\n\t"         /* calculate new value */
  559                 "scd    %2, %1\n\t"             /* attempt to store */
  560                 "beqz   %2, 1b\n\t"             /* spin if failed */
  561                 : "=&r" (value), "=m" (*p), "=&r" (temp)
  562                 : "r" (v), "m" (*p));
  563         return (value);
  564 }
  565 #endif
  566 
  567 static __inline void
  568 atomic_thread_fence_acq(void)
  569 {
  570 
  571         mips_sync();
  572 }
  573 
  574 static __inline void
  575 atomic_thread_fence_rel(void)
  576 {
  577 
  578         mips_sync();
  579 }
  580 
  581 static __inline void
  582 atomic_thread_fence_acq_rel(void)
  583 {
  584 
  585         mips_sync();
  586 }
  587 
  588 static __inline void
  589 atomic_thread_fence_seq_cst(void)
  590 {
  591 
  592         mips_sync();
  593 }
  594 
  595 /* Operations on chars. */
  596 #define atomic_set_char         atomic_set_8
  597 #define atomic_set_acq_char     atomic_set_acq_8
  598 #define atomic_set_rel_char     atomic_set_rel_8
  599 #define atomic_clear_char       atomic_clear_8
  600 #define atomic_clear_acq_char   atomic_clear_acq_8
  601 #define atomic_clear_rel_char   atomic_clear_rel_8
  602 #define atomic_add_char         atomic_add_8
  603 #define atomic_add_acq_char     atomic_add_acq_8
  604 #define atomic_add_rel_char     atomic_add_rel_8
  605 #define atomic_subtract_char    atomic_subtract_8
  606 #define atomic_subtract_acq_char        atomic_subtract_acq_8
  607 #define atomic_subtract_rel_char        atomic_subtract_rel_8
  608 
  609 /* Operations on shorts. */
  610 #define atomic_set_short        atomic_set_16
  611 #define atomic_set_acq_short    atomic_set_acq_16
  612 #define atomic_set_rel_short    atomic_set_rel_16
  613 #define atomic_clear_short      atomic_clear_16
  614 #define atomic_clear_acq_short  atomic_clear_acq_16
  615 #define atomic_clear_rel_short  atomic_clear_rel_16
  616 #define atomic_add_short        atomic_add_16
  617 #define atomic_add_acq_short    atomic_add_acq_16
  618 #define atomic_add_rel_short    atomic_add_rel_16
  619 #define atomic_subtract_short   atomic_subtract_16
  620 #define atomic_subtract_acq_short       atomic_subtract_acq_16
  621 #define atomic_subtract_rel_short       atomic_subtract_rel_16
  622 
  623 /* Operations on ints. */
  624 #define atomic_set_int          atomic_set_32
  625 #define atomic_set_acq_int      atomic_set_acq_32
  626 #define atomic_set_rel_int      atomic_set_rel_32
  627 #define atomic_clear_int        atomic_clear_32
  628 #define atomic_clear_acq_int    atomic_clear_acq_32
  629 #define atomic_clear_rel_int    atomic_clear_rel_32
  630 #define atomic_add_int          atomic_add_32
  631 #define atomic_add_acq_int      atomic_add_acq_32
  632 #define atomic_add_rel_int      atomic_add_rel_32
  633 #define atomic_subtract_int     atomic_subtract_32
  634 #define atomic_subtract_acq_int atomic_subtract_acq_32
  635 #define atomic_subtract_rel_int atomic_subtract_rel_32
  636 #define atomic_cmpset_int       atomic_cmpset_32
  637 #define atomic_cmpset_acq_int   atomic_cmpset_acq_32
  638 #define atomic_cmpset_rel_int   atomic_cmpset_rel_32
  639 #define atomic_fcmpset_int      atomic_fcmpset_32
  640 #define atomic_fcmpset_acq_int  atomic_fcmpset_acq_32
  641 #define atomic_fcmpset_rel_int  atomic_fcmpset_rel_32
  642 #define atomic_load_acq_int     atomic_load_acq_32
  643 #define atomic_store_rel_int    atomic_store_rel_32
  644 #define atomic_readandclear_int atomic_readandclear_32
  645 #define atomic_readandset_int   atomic_readandset_32
  646 #define atomic_fetchadd_int     atomic_fetchadd_32
  647 
  648 /*
  649  * I think the following is right, even for n32.  For n32 the pointers
  650  * are still 32-bits, so we need to operate on them as 32-bit quantities,
  651  * even though they are sign extended in operation.  For longs, there's
  652  * no question because they are always 32-bits.
  653  */
  654 #ifdef __mips_n64
  655 /* Operations on longs. */
  656 #define atomic_set_long         atomic_set_64
  657 #define atomic_set_acq_long     atomic_set_acq_64
  658 #define atomic_set_rel_long     atomic_set_rel_64
  659 #define atomic_clear_long       atomic_clear_64
  660 #define atomic_clear_acq_long   atomic_clear_acq_64
  661 #define atomic_clear_rel_long   atomic_clear_rel_64
  662 #define atomic_add_long         atomic_add_64
  663 #define atomic_add_acq_long     atomic_add_acq_64
  664 #define atomic_add_rel_long     atomic_add_rel_64
  665 #define atomic_subtract_long    atomic_subtract_64
  666 #define atomic_subtract_acq_long        atomic_subtract_acq_64
  667 #define atomic_subtract_rel_long        atomic_subtract_rel_64
  668 #define atomic_cmpset_long      atomic_cmpset_64
  669 #define atomic_cmpset_acq_long  atomic_cmpset_acq_64
  670 #define atomic_cmpset_rel_long  atomic_cmpset_rel_64
  671 #define atomic_fcmpset_long     atomic_fcmpset_64
  672 #define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
  673 #define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
  674 #define atomic_load_acq_long    atomic_load_acq_64
  675 #define atomic_store_rel_long   atomic_store_rel_64
  676 #define atomic_fetchadd_long    atomic_fetchadd_64
  677 #define atomic_readandclear_long        atomic_readandclear_64
  678 
  679 #else /* !__mips_n64 */
  680 
  681 /* Operations on longs. */
  682 #define atomic_set_long(p, v)                                           \
  683         atomic_set_32((volatile u_int *)(p), (u_int)(v))
  684 #define atomic_set_acq_long(p, v)                                       \
  685         atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
  686 #define atomic_set_rel_long(p, v)                                       \
  687         atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
  688 #define atomic_clear_long(p, v)                                         \
  689         atomic_clear_32((volatile u_int *)(p), (u_int)(v))
  690 #define atomic_clear_acq_long(p, v)                                     \
  691         atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
  692 #define atomic_clear_rel_long(p, v)                                     \
  693         atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
  694 #define atomic_add_long(p, v)                                           \
  695         atomic_add_32((volatile u_int *)(p), (u_int)(v))
  696 #define atomic_add_acq_long(p, v)                                       \
  697         atomic_add_32((volatile u_int *)(p), (u_int)(v))
  698 #define atomic_add_rel_long(p, v)                                       \
  699         atomic_add_32((volatile u_int *)(p), (u_int)(v))
  700 #define atomic_subtract_long(p, v)                                      \
  701         atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
  702 #define atomic_subtract_acq_long(p, v)                                  \
  703         atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
  704 #define atomic_subtract_rel_long(p, v)                                  \
  705         atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
  706 #define atomic_cmpset_long(p, cmpval, newval)                           \
  707         atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval),        \
  708             (u_int)(newval))
  709 #define atomic_cmpset_acq_long(p, cmpval, newval)                       \
  710         atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval),    \
  711             (u_int)(newval))
  712 #define atomic_cmpset_rel_long(p, cmpval, newval)                       \
  713         atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval),    \
  714             (u_int)(newval))
  715 #define atomic_fcmpset_long(p, cmpval, newval)                          \
  716         atomic_fcmpset_32((volatile u_int *)(p), (u_int *)(cmpval),     \
  717             (u_int)(newval))
  718 #define atomic_fcmpset_acq_long(p, cmpval, newval)                      \
  719         atomic_fcmpset_acq_32((volatile u_int *)(p), (u_int *)(cmpval), \
  720             (u_int)(newval))
  721 #define atomic_fcmpset_rel_long(p, cmpval, newval)                      \
  722         atomic_fcmpset_rel_32((volatile u_int *)(p), (u_int *)(cmpval), \
  723             (u_int)(newval))
  724 #define atomic_load_acq_long(p)                                         \
  725         (u_long)atomic_load_acq_32((volatile u_int *)(p))
  726 #define atomic_store_rel_long(p, v)                                     \
  727         atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
  728 #define atomic_fetchadd_long(p, v)                                      \
  729         atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
  730 #define atomic_readandclear_long(p)                                     \
  731         atomic_readandclear_32((volatile u_int *)(p))
  732 
  733 #endif /* __mips_n64 */
  734 
  735 /* Operations on pointers. */
  736 #define atomic_set_ptr          atomic_set_long
  737 #define atomic_set_acq_ptr      atomic_set_acq_long
  738 #define atomic_set_rel_ptr      atomic_set_rel_long
  739 #define atomic_clear_ptr        atomic_clear_long
  740 #define atomic_clear_acq_ptr    atomic_clear_acq_long
  741 #define atomic_clear_rel_ptr    atomic_clear_rel_long
  742 #define atomic_add_ptr          atomic_add_long
  743 #define atomic_add_acq_ptr      atomic_add_acq_long
  744 #define atomic_add_rel_ptr      atomic_add_rel_long
  745 #define atomic_subtract_ptr     atomic_subtract_long
  746 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
  747 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
  748 #define atomic_cmpset_ptr       atomic_cmpset_long
  749 #define atomic_cmpset_acq_ptr   atomic_cmpset_acq_long
  750 #define atomic_cmpset_rel_ptr   atomic_cmpset_rel_long
  751 #define atomic_fcmpset_ptr      atomic_fcmpset_long
  752 #define atomic_fcmpset_acq_ptr  atomic_fcmpset_acq_long
  753 #define atomic_fcmpset_rel_ptr  atomic_fcmpset_rel_long
  754 #define atomic_load_acq_ptr     atomic_load_acq_long
  755 #define atomic_store_rel_ptr    atomic_store_rel_long
  756 #define atomic_readandclear_ptr atomic_readandclear_long
  757 
  758 #endif /* ! _MACHINE_ATOMIC_H_ */

Cache object: c6faff81d4172ef7b362446fb750e102


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.