The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1998 Doug Rabson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *      from: src/sys/alpha/include/atomic.h,v 1.21.2.3 2005/10/06 18:12:05 jhb
   29  * $FreeBSD$
   30  */
   31 
   32 #ifndef _MACHINE_ATOMIC_H_
   33 #define _MACHINE_ATOMIC_H_
   34 
   35 #ifndef _SYS_CDEFS_H_
   36 #error this file needs sys/cdefs.h as a prerequisite
   37 #endif
   38 
   39 #include <sys/atomic_common.h>
   40 
   41 #if !defined(__mips_n64) && !defined(__mips_n32)
   42 #include <sys/_atomic64e.h>
   43 #endif
   44 
   45 /*
   46  * Note: All the 64-bit atomic operations are only atomic when running
   47  * in 64-bit mode.  It is assumed that code compiled for n32 and n64
   48  * fits into this definition and no further safeties are needed.
   49  *
   50  * It is also assumed that the add, subtract and other arithmetic is
   51  * done on numbers not pointers.  The special rules for n32 pointers
   52  * do not have atomic operations defined for them, but generally shouldn't
   53  * need atomic operations.
   54  */
   55 #ifndef __MIPS_PLATFORM_SYNC_NOPS
   56 #define __MIPS_PLATFORM_SYNC_NOPS ""
   57 #endif
   58 
   59 static __inline  void
   60 mips_sync(void)
   61 {
   62         __asm __volatile (".set noreorder\n"
   63                         "\tsync\n"
   64                         __MIPS_PLATFORM_SYNC_NOPS
   65                         ".set reorder\n"
   66                         : : : "memory");
   67 }
   68 
   69 #define mb()    mips_sync()
   70 #define wmb()   mips_sync()
   71 #define rmb()   mips_sync()
   72 
   73 /*
   74  * Various simple arithmetic on memory which is atomic in the presence
   75  * of interrupts and SMP safe.
   76  */
   77 
   78 void atomic_set_8(__volatile uint8_t *, uint8_t);
   79 void atomic_clear_8(__volatile uint8_t *, uint8_t);
   80 void atomic_add_8(__volatile uint8_t *, uint8_t);
   81 void atomic_subtract_8(__volatile uint8_t *, uint8_t);
   82 
   83 void atomic_set_16(__volatile uint16_t *, uint16_t);
   84 void atomic_clear_16(__volatile uint16_t *, uint16_t);
   85 void atomic_add_16(__volatile uint16_t *, uint16_t);
   86 void atomic_subtract_16(__volatile uint16_t *, uint16_t);
   87 
   88 static __inline int atomic_cmpset_8(__volatile uint8_t *, uint8_t, uint8_t);
   89 static __inline int atomic_fcmpset_8(__volatile uint8_t *, uint8_t *, uint8_t);
   90 static __inline int atomic_cmpset_16(__volatile uint16_t *, uint16_t, uint16_t);
   91 static __inline int atomic_fcmpset_16(__volatile uint16_t *, uint16_t *, uint16_t);
   92 
   93 static __inline void
   94 atomic_set_32(__volatile uint32_t *p, uint32_t v)
   95 {
   96         uint32_t temp;
   97 
   98         __asm __volatile (
   99                 "1:\tll %0, %3\n\t"             /* load old value */
  100                 "or     %0, %2, %0\n\t"         /* calculate new value */
  101                 "sc     %0, %1\n\t"             /* attempt to store */
  102                 "beqz   %0, 1b\n\t"             /* spin if failed */
  103                 : "=&r" (temp), "=m" (*p)
  104                 : "r" (v), "m" (*p)
  105                 : "memory");
  106 
  107 }
  108 
  109 static __inline void
  110 atomic_clear_32(__volatile uint32_t *p, uint32_t v)
  111 {
  112         uint32_t temp;
  113         v = ~v;
  114 
  115         __asm __volatile (
  116                 "1:\tll %0, %3\n\t"             /* load old value */
  117                 "and    %0, %2, %0\n\t"         /* calculate new value */
  118                 "sc     %0, %1\n\t"             /* attempt to store */
  119                 "beqz   %0, 1b\n\t"             /* spin if failed */
  120                 : "=&r" (temp), "=m" (*p)
  121                 : "r" (v), "m" (*p)
  122                 : "memory");
  123 }
  124 
  125 static __inline void
  126 atomic_add_32(__volatile uint32_t *p, uint32_t v)
  127 {
  128         uint32_t temp;
  129 
  130         __asm __volatile (
  131                 "1:\tll %0, %3\n\t"             /* load old value */
  132                 "addu   %0, %2, %0\n\t"         /* calculate new value */
  133                 "sc     %0, %1\n\t"             /* attempt to store */
  134                 "beqz   %0, 1b\n\t"             /* spin if failed */
  135                 : "=&r" (temp), "=m" (*p)
  136                 : "r" (v), "m" (*p)
  137                 : "memory");
  138 }
  139 
  140 static __inline void
  141 atomic_subtract_32(__volatile uint32_t *p, uint32_t v)
  142 {
  143         uint32_t temp;
  144 
  145         __asm __volatile (
  146                 "1:\tll %0, %3\n\t"             /* load old value */
  147                 "subu   %0, %2\n\t"             /* calculate new value */
  148                 "sc     %0, %1\n\t"             /* attempt to store */
  149                 "beqz   %0, 1b\n\t"             /* spin if failed */
  150                 : "=&r" (temp), "=m" (*p)
  151                 : "r" (v), "m" (*p)
  152                 : "memory");
  153 }
  154 
  155 static __inline uint32_t
  156 atomic_readandclear_32(__volatile uint32_t *addr)
  157 {
  158         uint32_t result,temp;
  159 
  160         __asm __volatile (
  161                 "1:\tll  %0,%3\n\t"     /* load current value, asserting lock */
  162                 "li      %1,0\n\t"              /* value to store */
  163                 "sc      %1,%2\n\t"     /* attempt to store */
  164                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  165                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  166                 : "m" (*addr)
  167                 : "memory");
  168 
  169         return result;
  170 }
  171 
  172 static __inline uint32_t
  173 atomic_readandset_32(__volatile uint32_t *addr, uint32_t value)
  174 {
  175         uint32_t result,temp;
  176 
  177         __asm __volatile (
  178                 "1:\tll  %0,%3\n\t"     /* load current value, asserting lock */
  179                 "or      %1,$0,%4\n\t"
  180                 "sc      %1,%2\n\t"     /* attempt to store */
  181                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  182                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  183                 : "m" (*addr), "r" (value)
  184                 : "memory");
  185 
  186         return result;
  187 }
  188 
  189 #if defined(__mips_n64) || defined(__mips_n32)
  190 static __inline void
  191 atomic_set_64(__volatile uint64_t *p, uint64_t v)
  192 {
  193         uint64_t temp;
  194 
  195         __asm __volatile (
  196                 "1:\n\t"
  197                 "lld    %0, %3\n\t"             /* load old value */
  198                 "or     %0, %2, %0\n\t"         /* calculate new value */
  199                 "scd    %0, %1\n\t"             /* attempt to store */
  200                 "beqz   %0, 1b\n\t"             /* spin if failed */
  201                 : "=&r" (temp), "=m" (*p)
  202                 : "r" (v), "m" (*p)
  203                 : "memory");
  204 
  205 }
  206 
  207 static __inline void
  208 atomic_clear_64(__volatile uint64_t *p, uint64_t v)
  209 {
  210         uint64_t temp;
  211         v = ~v;
  212 
  213         __asm __volatile (
  214                 "1:\n\t"
  215                 "lld    %0, %3\n\t"             /* load old value */
  216                 "and    %0, %2, %0\n\t"         /* calculate new value */
  217                 "scd    %0, %1\n\t"             /* attempt to store */
  218                 "beqz   %0, 1b\n\t"             /* spin if failed */
  219                 : "=&r" (temp), "=m" (*p)
  220                 : "r" (v), "m" (*p)
  221                 : "memory");
  222 }
  223 
  224 static __inline void
  225 atomic_add_64(__volatile uint64_t *p, uint64_t v)
  226 {
  227         uint64_t temp;
  228 
  229         __asm __volatile (
  230                 "1:\n\t"
  231                 "lld    %0, %3\n\t"             /* load old value */
  232                 "daddu  %0, %2, %0\n\t"         /* calculate new value */
  233                 "scd    %0, %1\n\t"             /* attempt to store */
  234                 "beqz   %0, 1b\n\t"             /* spin if failed */
  235                 : "=&r" (temp), "=m" (*p)
  236                 : "r" (v), "m" (*p)
  237                 : "memory");
  238 }
  239 
  240 static __inline void
  241 atomic_subtract_64(__volatile uint64_t *p, uint64_t v)
  242 {
  243         uint64_t temp;
  244 
  245         __asm __volatile (
  246                 "1:\n\t"
  247                 "lld    %0, %3\n\t"             /* load old value */
  248                 "dsubu  %0, %2\n\t"             /* calculate new value */
  249                 "scd    %0, %1\n\t"             /* attempt to store */
  250                 "beqz   %0, 1b\n\t"             /* spin if failed */
  251                 : "=&r" (temp), "=m" (*p)
  252                 : "r" (v), "m" (*p)
  253                 : "memory");
  254 }
  255 
  256 static __inline uint64_t
  257 atomic_readandclear_64(__volatile uint64_t *addr)
  258 {
  259         uint64_t result,temp;
  260 
  261         __asm __volatile (
  262                 "1:\n\t"
  263                 "lld     %0, %3\n\t"            /* load old value */
  264                 "li      %1, 0\n\t"             /* value to store */
  265                 "scd     %1, %2\n\t"            /* attempt to store */
  266                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  267                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  268                 : "m" (*addr)
  269                 : "memory");
  270 
  271         return result;
  272 }
  273 
  274 static __inline uint64_t
  275 atomic_readandset_64(__volatile uint64_t *addr, uint64_t value)
  276 {
  277         uint64_t result,temp;
  278 
  279         __asm __volatile (
  280                 "1:\n\t"
  281                 "lld     %0,%3\n\t"             /* Load old value*/
  282                 "or      %1,$0,%4\n\t"
  283                 "scd     %1,%2\n\t"             /* attempt to store */
  284                 "beqz    %1, 1b\n\t"            /* if the store failed, spin */
  285                 : "=&r"(result), "=&r"(temp), "=m" (*addr)
  286                 : "m" (*addr), "r" (value)
  287                 : "memory");
  288 
  289         return result;
  290 }
  291 #endif
  292 
  293 #define ATOMIC_ACQ_REL(NAME, WIDTH)                                     \
  294 static __inline  void                                                   \
  295 atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
  296 {                                                                       \
  297         atomic_##NAME##_##WIDTH(p, v);                                  \
  298         mips_sync();                                                    \
  299 }                                                                       \
  300                                                                         \
  301 static __inline  void                                                   \
  302 atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
  303 {                                                                       \
  304         mips_sync();                                                    \
  305         atomic_##NAME##_##WIDTH(p, v);                                  \
  306 }
  307 
  308 /* Variants of simple arithmetic with memory barriers. */
  309 ATOMIC_ACQ_REL(set, 8)
  310 ATOMIC_ACQ_REL(clear, 8)
  311 ATOMIC_ACQ_REL(add, 8)
  312 ATOMIC_ACQ_REL(subtract, 8)
  313 ATOMIC_ACQ_REL(set, 16)
  314 ATOMIC_ACQ_REL(clear, 16)
  315 ATOMIC_ACQ_REL(add, 16)
  316 ATOMIC_ACQ_REL(subtract, 16)
  317 ATOMIC_ACQ_REL(set, 32)
  318 ATOMIC_ACQ_REL(clear, 32)
  319 ATOMIC_ACQ_REL(add, 32)
  320 ATOMIC_ACQ_REL(subtract, 32)
  321 #if defined(__mips_n64) || defined(__mips_n32)
  322 ATOMIC_ACQ_REL(set, 64)
  323 ATOMIC_ACQ_REL(clear, 64)
  324 ATOMIC_ACQ_REL(add, 64)
  325 ATOMIC_ACQ_REL(subtract, 64)
  326 #endif
  327 
  328 #undef ATOMIC_ACQ_REL
  329 
  330 /*
  331  * We assume that a = b will do atomic loads and stores.
  332  */
  333 #define ATOMIC_STORE_LOAD(WIDTH)                        \
  334 static __inline  uint##WIDTH##_t                        \
  335 atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p)  \
  336 {                                                       \
  337         uint##WIDTH##_t v;                              \
  338                                                         \
  339         v = *p;                                         \
  340         mips_sync();                                    \
  341         return (v);                                     \
  342 }                                                       \
  343                                                         \
  344 static __inline  void                                   \
  345 atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\
  346 {                                                       \
  347         mips_sync();                                    \
  348         *p = v;                                         \
  349 }
  350 
  351 ATOMIC_STORE_LOAD(32)
  352 #if defined(__mips_n64) || defined(__mips_n32)
  353 ATOMIC_STORE_LOAD(64)
  354 #endif
  355 #undef ATOMIC_STORE_LOAD
  356 
  357 /*
  358  * MIPS n32 is not a LP64 API, so atomic_load_64 isn't defined there. Define it
  359  * here since n32 is an oddball !LP64 but that can do 64-bit atomics.
  360  */
  361 #if defined(__mips_n32)
  362 #define atomic_load_64  atomic_load_acq_64
  363 #endif
  364 
  365 /*
  366  * Atomically compare the value stored at *p with cmpval and if the
  367  * two values are equal, update the value of *p with newval. Returns
  368  * zero if the compare failed, nonzero otherwise.
  369  */
  370 static __inline int
  371 atomic_cmpset_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
  372 {
  373         int ret;
  374 
  375         __asm __volatile (
  376                 "1:\tll %0, %4\n\t"             /* load old value */
  377                 "bne %0, %2, 2f\n\t"            /* compare */
  378                 "move %0, %3\n\t"               /* value to store */
  379                 "sc %0, %1\n\t"                 /* attempt to store */
  380                 "beqz %0, 1b\n\t"               /* if it failed, spin */
  381                 "j 3f\n\t"
  382                 "2:\n\t"
  383                 "li     %0, 0\n\t"
  384                 "3:\n"
  385                 : "=&r" (ret), "=m" (*p)
  386                 : "r" (cmpval), "r" (newval), "m" (*p)
  387                 : "memory");
  388 
  389         return ret;
  390 }
  391 
  392 /*
  393  * Atomically compare the value stored at *p with cmpval and if the
  394  * two values are equal, update the value of *p with newval. Returns
  395  * zero if the compare failed, nonzero otherwise.
  396  */
  397 static __inline int
  398 atomic_fcmpset_32(__volatile uint32_t *p, uint32_t *cmpval, uint32_t newval)
  399 {
  400         int ret;
  401 
  402         /*
  403          * The following sequence (similar to that in atomic_fcmpset_64) will
  404          * attempt to update the value of *p with newval if the comparison
  405          * succeeds.  Note that they'll exit regardless of whether the store
  406          * actually succeeded, leaving *cmpval untouched.  This is in line with
  407          * the documentation of atomic_fcmpset_<type>() in atomic(9) for ll/sc
  408          * architectures.
  409          */
  410         __asm __volatile (
  411                 "ll     %0, %1\n\t"             /* load old value */
  412                 "bne    %0, %4, 1f\n\t"         /* compare */
  413                 "move   %0, %3\n\t"             /* value to store */
  414                 "sc     %0, %1\n\t"             /* attempt to store */
  415                 "j      2f\n\t"                 /* exit regardless of success */
  416                 "nop\n\t"                       /* avoid delay slot accident */
  417                 "1:\n\t"
  418                 "sw     %0, %2\n\t"             /* save old value */
  419                 "li     %0, 0\n\t"
  420                 "2:\n"
  421                 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
  422                 : "r" (newval), "r" (*cmpval)
  423                 : "memory");
  424         return ret;
  425 }
  426 
  427 #define ATOMIC_CMPSET_ACQ_REL(WIDTH)                                    \
  428 static __inline  int                                                    \
  429 atomic_cmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p,                \
  430     uint##WIDTH##_t cmpval, uint##WIDTH##_t newval)                     \
  431 {                                                                       \
  432         int retval;                                                     \
  433                                                                         \
  434         retval = atomic_cmpset_##WIDTH(p, cmpval, newval);              \
  435         mips_sync();                                                    \
  436         return (retval);                                                \
  437 }                                                                       \
  438                                                                         \
  439 static __inline  int                                                    \
  440 atomic_cmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p,                \
  441     uint##WIDTH##_t cmpval, uint##WIDTH##_t newval)                     \
  442 {                                                                       \
  443         mips_sync();                                                    \
  444         return (atomic_cmpset_##WIDTH(p, cmpval, newval));              \
  445 }
  446 
  447 #define ATOMIC_FCMPSET_ACQ_REL(WIDTH)                                   \
  448 static __inline  int                                                    \
  449 atomic_fcmpset_acq_##WIDTH(__volatile uint##WIDTH##_t *p,               \
  450     uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval)                    \
  451 {                                                                       \
  452         int retval;                                                     \
  453                                                                         \
  454         retval = atomic_fcmpset_##WIDTH(p, cmpval, newval);             \
  455         mips_sync();                                                    \
  456         return (retval);                                                \
  457 }                                                                       \
  458                                                                         \
  459 static __inline  int                                                    \
  460 atomic_fcmpset_rel_##WIDTH(__volatile uint##WIDTH##_t *p,               \
  461     uint##WIDTH##_t *cmpval, uint##WIDTH##_t newval)                    \
  462 {                                                                       \
  463         mips_sync();                                                    \
  464         return (atomic_fcmpset_##WIDTH(p, cmpval, newval));             \
  465 }
  466 
  467 /*
  468  * Atomically compare the value stored at *p with cmpval and if the
  469  * two values are equal, update the value of *p with newval. Returns
  470  * zero if the compare failed, nonzero otherwise.
  471  */
  472 ATOMIC_CMPSET_ACQ_REL(8);
  473 ATOMIC_CMPSET_ACQ_REL(16);
  474 ATOMIC_CMPSET_ACQ_REL(32);
  475 ATOMIC_FCMPSET_ACQ_REL(8);
  476 ATOMIC_FCMPSET_ACQ_REL(16);
  477 ATOMIC_FCMPSET_ACQ_REL(32);
  478 
  479 /*
  480  * Atomically add the value of v to the integer pointed to by p and return
  481  * the previous value of *p.
  482  */
  483 static __inline uint32_t
  484 atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v)
  485 {
  486         uint32_t value, temp;
  487 
  488         __asm __volatile (
  489                 "1:\tll %0, %1\n\t"             /* load old value */
  490                 "addu %2, %3, %0\n\t"           /* calculate new value */
  491                 "sc %2, %1\n\t"                 /* attempt to store */
  492                 "beqz %2, 1b\n\t"               /* spin if failed */
  493                 : "=&r" (value), "=m" (*p), "=&r" (temp)
  494                 : "r" (v), "m" (*p));
  495         return (value);
  496 }
  497 
  498 #if defined(__mips_n64) || defined(__mips_n32)
  499 /*
  500  * Atomically compare the value stored at *p with cmpval and if the
  501  * two values are equal, update the value of *p with newval. Returns
  502  * zero if the compare failed, nonzero otherwise.
  503  */
  504 static __inline int
  505 atomic_cmpset_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
  506 {
  507         int ret;
  508 
  509         __asm __volatile (
  510                 "1:\n\t"
  511                 "lld    %0, %4\n\t"             /* load old value */
  512                 "bne    %0, %2, 2f\n\t"         /* compare */
  513                 "move   %0, %3\n\t"             /* value to store */
  514                 "scd    %0, %1\n\t"             /* attempt to store */
  515                 "beqz   %0, 1b\n\t"             /* if it failed, spin */
  516                 "j      3f\n\t"
  517                 "2:\n\t"
  518                 "li     %0, 0\n\t"
  519                 "3:\n"
  520                 : "=&r" (ret), "=m" (*p)
  521                 : "r" (cmpval), "r" (newval), "m" (*p)
  522                 : "memory");
  523 
  524         return ret;
  525 }
  526 
  527 static __inline int
  528 atomic_fcmpset_64(__volatile uint64_t *p, uint64_t *cmpval, uint64_t newval)
  529 {
  530         int ret;
  531 
  532         __asm __volatile (
  533                 "lld    %0, %1\n\t"             /* load old value */
  534                 "bne    %0, %4, 1f\n\t"         /* compare */
  535                 "move   %0, %3\n\t"             /* value to store */
  536                 "scd    %0, %1\n\t"             /* attempt to store */
  537                 "j      2f\n\t"                 /* exit regardless of success */
  538                 "nop\n\t"                       /* avoid delay slot accident */
  539                 "1:\n\t"
  540                 "sd     %0, %2\n\t"             /* save old value */
  541                 "li     %0, 0\n\t"
  542                 "2:\n"
  543                 : "=&r" (ret), "+m" (*p), "=m" (*cmpval)
  544                 : "r" (newval), "r" (*cmpval)
  545                 : "memory");
  546 
  547         return ret;
  548 }
  549 
  550 /*
  551  * Atomically compare the value stored at *p with cmpval and if the
  552  * two values are equal, update the value of *p with newval. Returns
  553  * zero if the compare failed, nonzero otherwise.
  554  */
  555 ATOMIC_CMPSET_ACQ_REL(64);
  556 ATOMIC_FCMPSET_ACQ_REL(64);
  557 
  558 /*
  559  * Atomically add the value of v to the integer pointed to by p and return
  560  * the previous value of *p.
  561  */
  562 static __inline uint64_t
  563 atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v)
  564 {
  565         uint64_t value, temp;
  566 
  567         __asm __volatile (
  568                 "1:\n\t"
  569                 "lld    %0, %1\n\t"             /* load old value */
  570                 "daddu  %2, %3, %0\n\t"         /* calculate new value */
  571                 "scd    %2, %1\n\t"             /* attempt to store */
  572                 "beqz   %2, 1b\n\t"             /* spin if failed */
  573                 : "=&r" (value), "=m" (*p), "=&r" (temp)
  574                 : "r" (v), "m" (*p));
  575         return (value);
  576 }
  577 #endif
  578 
  579 static __inline void
  580 atomic_thread_fence_acq(void)
  581 {
  582 
  583         mips_sync();
  584 }
  585 
  586 static __inline void
  587 atomic_thread_fence_rel(void)
  588 {
  589 
  590         mips_sync();
  591 }
  592 
  593 static __inline void
  594 atomic_thread_fence_acq_rel(void)
  595 {
  596 
  597         mips_sync();
  598 }
  599 
  600 static __inline void
  601 atomic_thread_fence_seq_cst(void)
  602 {
  603 
  604         mips_sync();
  605 }
  606 
  607 /* Operations on chars. */
  608 #define atomic_set_char         atomic_set_8
  609 #define atomic_set_acq_char     atomic_set_acq_8
  610 #define atomic_set_rel_char     atomic_set_rel_8
  611 #define atomic_clear_char       atomic_clear_8
  612 #define atomic_clear_acq_char   atomic_clear_acq_8
  613 #define atomic_clear_rel_char   atomic_clear_rel_8
  614 #define atomic_add_char         atomic_add_8
  615 #define atomic_add_acq_char     atomic_add_acq_8
  616 #define atomic_add_rel_char     atomic_add_rel_8
  617 #define atomic_subtract_char    atomic_subtract_8
  618 #define atomic_subtract_acq_char        atomic_subtract_acq_8
  619 #define atomic_subtract_rel_char        atomic_subtract_rel_8
  620 #define atomic_cmpset_char      atomic_cmpset_8
  621 #define atomic_cmpset_acq_char  atomic_cmpset_acq_8
  622 #define atomic_cmpset_rel_char  atomic_cmpset_rel_8
  623 #define atomic_fcmpset_char     atomic_fcmpset_8
  624 #define atomic_fcmpset_acq_char atomic_fcmpset_acq_8
  625 #define atomic_fcmpset_rel_char atomic_fcmpset_rel_8
  626 
  627 /* Operations on shorts. */
  628 #define atomic_set_short        atomic_set_16
  629 #define atomic_set_acq_short    atomic_set_acq_16
  630 #define atomic_set_rel_short    atomic_set_rel_16
  631 #define atomic_clear_short      atomic_clear_16
  632 #define atomic_clear_acq_short  atomic_clear_acq_16
  633 #define atomic_clear_rel_short  atomic_clear_rel_16
  634 #define atomic_add_short        atomic_add_16
  635 #define atomic_add_acq_short    atomic_add_acq_16
  636 #define atomic_add_rel_short    atomic_add_rel_16
  637 #define atomic_subtract_short   atomic_subtract_16
  638 #define atomic_subtract_acq_short       atomic_subtract_acq_16
  639 #define atomic_subtract_rel_short       atomic_subtract_rel_16
  640 #define atomic_cmpset_short     atomic_cmpset_16
  641 #define atomic_cmpset_acq_short atomic_cmpset_acq_16
  642 #define atomic_cmpset_rel_short atomic_cmpset_rel_16
  643 #define atomic_fcmpset_short    atomic_fcmpset_16
  644 #define atomic_fcmpset_acq_short        atomic_fcmpset_acq_16
  645 #define atomic_fcmpset_rel_short        atomic_fcmpset_rel_16
  646 
  647 /* Operations on ints. */
  648 #define atomic_set_int          atomic_set_32
  649 #define atomic_set_acq_int      atomic_set_acq_32
  650 #define atomic_set_rel_int      atomic_set_rel_32
  651 #define atomic_clear_int        atomic_clear_32
  652 #define atomic_clear_acq_int    atomic_clear_acq_32
  653 #define atomic_clear_rel_int    atomic_clear_rel_32
  654 #define atomic_add_int          atomic_add_32
  655 #define atomic_add_acq_int      atomic_add_acq_32
  656 #define atomic_add_rel_int      atomic_add_rel_32
  657 #define atomic_subtract_int     atomic_subtract_32
  658 #define atomic_subtract_acq_int atomic_subtract_acq_32
  659 #define atomic_subtract_rel_int atomic_subtract_rel_32
  660 #define atomic_cmpset_int       atomic_cmpset_32
  661 #define atomic_cmpset_acq_int   atomic_cmpset_acq_32
  662 #define atomic_cmpset_rel_int   atomic_cmpset_rel_32
  663 #define atomic_fcmpset_int      atomic_fcmpset_32
  664 #define atomic_fcmpset_acq_int  atomic_fcmpset_acq_32
  665 #define atomic_fcmpset_rel_int  atomic_fcmpset_rel_32
  666 #define atomic_load_acq_int     atomic_load_acq_32
  667 #define atomic_store_rel_int    atomic_store_rel_32
  668 #define atomic_readandclear_int atomic_readandclear_32
  669 #define atomic_readandset_int   atomic_readandset_32
  670 #define atomic_fetchadd_int     atomic_fetchadd_32
  671 
  672 /*
  673  * I think the following is right, even for n32.  For n32 the pointers
  674  * are still 32-bits, so we need to operate on them as 32-bit quantities,
  675  * even though they are sign extended in operation.  For longs, there's
  676  * no question because they are always 32-bits.
  677  */
  678 #ifdef __mips_n64
  679 /* Operations on longs. */
  680 #define atomic_set_long         atomic_set_64
  681 #define atomic_set_acq_long     atomic_set_acq_64
  682 #define atomic_set_rel_long     atomic_set_rel_64
  683 #define atomic_clear_long       atomic_clear_64
  684 #define atomic_clear_acq_long   atomic_clear_acq_64
  685 #define atomic_clear_rel_long   atomic_clear_rel_64
  686 #define atomic_add_long         atomic_add_64
  687 #define atomic_add_acq_long     atomic_add_acq_64
  688 #define atomic_add_rel_long     atomic_add_rel_64
  689 #define atomic_subtract_long    atomic_subtract_64
  690 #define atomic_subtract_acq_long        atomic_subtract_acq_64
  691 #define atomic_subtract_rel_long        atomic_subtract_rel_64
  692 #define atomic_cmpset_long      atomic_cmpset_64
  693 #define atomic_cmpset_acq_long  atomic_cmpset_acq_64
  694 #define atomic_cmpset_rel_long  atomic_cmpset_rel_64
  695 #define atomic_fcmpset_long     atomic_fcmpset_64
  696 #define atomic_fcmpset_acq_long atomic_fcmpset_acq_64
  697 #define atomic_fcmpset_rel_long atomic_fcmpset_rel_64
  698 #define atomic_load_acq_long    atomic_load_acq_64
  699 #define atomic_store_rel_long   atomic_store_rel_64
  700 #define atomic_fetchadd_long    atomic_fetchadd_64
  701 #define atomic_readandclear_long        atomic_readandclear_64
  702 
  703 #else /* !__mips_n64 */
  704 
  705 /* Operations on longs. */
  706 #define atomic_set_long(p, v)                                           \
  707         atomic_set_32((volatile u_int *)(p), (u_int)(v))
  708 #define atomic_set_acq_long(p, v)                                       \
  709         atomic_set_acq_32((volatile u_int *)(p), (u_int)(v))
  710 #define atomic_set_rel_long(p, v)                                       \
  711         atomic_set_rel_32((volatile u_int *)(p), (u_int)(v))
  712 #define atomic_clear_long(p, v)                                         \
  713         atomic_clear_32((volatile u_int *)(p), (u_int)(v))
  714 #define atomic_clear_acq_long(p, v)                                     \
  715         atomic_clear_acq_32((volatile u_int *)(p), (u_int)(v))
  716 #define atomic_clear_rel_long(p, v)                                     \
  717         atomic_clear_rel_32((volatile u_int *)(p), (u_int)(v))
  718 #define atomic_add_long(p, v)                                           \
  719         atomic_add_32((volatile u_int *)(p), (u_int)(v))
  720 #define atomic_add_acq_long(p, v)                                       \
  721         atomic_add_32((volatile u_int *)(p), (u_int)(v))
  722 #define atomic_add_rel_long(p, v)                                       \
  723         atomic_add_32((volatile u_int *)(p), (u_int)(v))
  724 #define atomic_subtract_long(p, v)                                      \
  725         atomic_subtract_32((volatile u_int *)(p), (u_int)(v))
  726 #define atomic_subtract_acq_long(p, v)                                  \
  727         atomic_subtract_acq_32((volatile u_int *)(p), (u_int)(v))
  728 #define atomic_subtract_rel_long(p, v)                                  \
  729         atomic_subtract_rel_32((volatile u_int *)(p), (u_int)(v))
  730 #define atomic_cmpset_long(p, cmpval, newval)                           \
  731         atomic_cmpset_32((volatile u_int *)(p), (u_int)(cmpval),        \
  732             (u_int)(newval))
  733 #define atomic_cmpset_acq_long(p, cmpval, newval)                       \
  734         atomic_cmpset_acq_32((volatile u_int *)(p), (u_int)(cmpval),    \
  735             (u_int)(newval))
  736 #define atomic_cmpset_rel_long(p, cmpval, newval)                       \
  737         atomic_cmpset_rel_32((volatile u_int *)(p), (u_int)(cmpval),    \
  738             (u_int)(newval))
  739 #define atomic_fcmpset_long(p, cmpval, newval)                          \
  740         atomic_fcmpset_32((volatile u_int *)(p), (u_int *)(cmpval),     \
  741             (u_int)(newval))
  742 #define atomic_fcmpset_acq_long(p, cmpval, newval)                      \
  743         atomic_fcmpset_acq_32((volatile u_int *)(p), (u_int *)(cmpval), \
  744             (u_int)(newval))
  745 #define atomic_fcmpset_rel_long(p, cmpval, newval)                      \
  746         atomic_fcmpset_rel_32((volatile u_int *)(p), (u_int *)(cmpval), \
  747             (u_int)(newval))
  748 #define atomic_load_acq_long(p)                                         \
  749         (u_long)atomic_load_acq_32((volatile u_int *)(p))
  750 #define atomic_store_rel_long(p, v)                                     \
  751         atomic_store_rel_32((volatile u_int *)(p), (u_int)(v))
  752 #define atomic_fetchadd_long(p, v)                                      \
  753         atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v))
  754 #define atomic_readandclear_long(p)                                     \
  755         atomic_readandclear_32((volatile u_int *)(p))
  756 
  757 #endif /* __mips_n64 */
  758 
  759 /* Operations on pointers. */
  760 #define atomic_set_ptr          atomic_set_long
  761 #define atomic_set_acq_ptr      atomic_set_acq_long
  762 #define atomic_set_rel_ptr      atomic_set_rel_long
  763 #define atomic_clear_ptr        atomic_clear_long
  764 #define atomic_clear_acq_ptr    atomic_clear_acq_long
  765 #define atomic_clear_rel_ptr    atomic_clear_rel_long
  766 #define atomic_add_ptr          atomic_add_long
  767 #define atomic_add_acq_ptr      atomic_add_acq_long
  768 #define atomic_add_rel_ptr      atomic_add_rel_long
  769 #define atomic_subtract_ptr     atomic_subtract_long
  770 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
  771 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
  772 #define atomic_cmpset_ptr       atomic_cmpset_long
  773 #define atomic_cmpset_acq_ptr   atomic_cmpset_acq_long
  774 #define atomic_cmpset_rel_ptr   atomic_cmpset_rel_long
  775 #define atomic_fcmpset_ptr      atomic_fcmpset_long
  776 #define atomic_fcmpset_acq_ptr  atomic_fcmpset_acq_long
  777 #define atomic_fcmpset_rel_ptr  atomic_fcmpset_rel_long
  778 #define atomic_load_acq_ptr     atomic_load_acq_long
  779 #define atomic_store_rel_ptr    atomic_store_rel_long
  780 #define atomic_readandclear_ptr atomic_readandclear_long
  781 
  782 static __inline unsigned int
  783 atomic_swap_int(volatile unsigned int *ptr, const unsigned int value)
  784 {
  785         unsigned int retval;
  786 
  787         retval = *ptr;
  788 
  789         while (!atomic_fcmpset_int(ptr, &retval, value))
  790                 ;
  791         return (retval);
  792 }
  793 
  794 static __inline uint32_t
  795 atomic_swap_32(volatile uint32_t *ptr, const uint32_t value)
  796 {
  797         uint32_t retval;
  798 
  799         retval = *ptr;
  800 
  801         while (!atomic_fcmpset_32(ptr, &retval, value))
  802                 ;
  803         return (retval);
  804 }
  805 
  806 #if defined(__mips_n64) || defined(__mips_n32)
  807 static __inline uint64_t
  808 atomic_swap_64(volatile uint64_t *ptr, const uint64_t value)
  809 {
  810         uint64_t retval;
  811 
  812         retval = *ptr;
  813 
  814         while (!atomic_fcmpset_64(ptr, &retval, value))
  815                 ;
  816         return (retval);
  817 }
  818 #endif
  819 
  820 #ifdef __mips_n64
  821 static __inline unsigned long
  822 atomic_swap_long(volatile unsigned long *ptr, const unsigned long value)
  823 {
  824         unsigned long retval;
  825 
  826         retval = *ptr;
  827 
  828         while (!atomic_fcmpset_64((volatile uint64_t *)ptr,
  829             (uint64_t *)&retval, value))
  830                 ;
  831         return (retval);
  832 }
  833 #else
  834 static __inline unsigned long
  835 atomic_swap_long(volatile unsigned long *ptr, const unsigned long value)
  836 {
  837         unsigned long retval;
  838 
  839         retval = *ptr;
  840 
  841         while (!atomic_fcmpset_32((volatile uint32_t *)ptr,
  842             (uint32_t *)&retval, value))
  843                 ;
  844         return (retval);
  845 }
  846 #endif
  847 #define atomic_swap_ptr(ptr, value) atomic_swap_long((unsigned long *)(ptr), value)
  848 
  849 #include <sys/_atomic_subword.h>
  850 
  851 #endif /* ! _MACHINE_ATOMIC_H_ */

Cache object: 060ba22ceadf7bf50f5aec5ae0eb3f6c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.