The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 #ifndef _MACHINE_ATOMIC_H_
   29 #define _MACHINE_ATOMIC_H_
   30 
   31 #ifndef _SYS_CDEFS_H_
   32 #error this file needs sys/cdefs.h as a prerequisite
   33 #endif
   34 
   35 #define mb()    __asm __volatile("mfence;" : : : "memory")
   36 #define wmb()   __asm __volatile("sfence;" : : : "memory")
   37 #define rmb()   __asm __volatile("lfence;" : : : "memory")
   38 
   39 /*
   40  * Various simple operations on memory, each of which is atomic in the
   41  * presence of interrupts and multiple processors.
   42  *
   43  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   44  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   45  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   46  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   47  *
   48  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   49  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   50  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   51  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   52  *
   53  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   54  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   55  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   56  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   57  * atomic_swap_int(P, V)        (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
   58  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   59  *
   60  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   61  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   62  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   63  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   64  * atomic_swap_long(P, V)       (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
   65  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   66  */
   67 
   68 /*
   69  * The above functions are expanded inline in the statically-linked
   70  * kernel.  Lock prefixes are generated if an SMP kernel is being
   71  * built.
   72  *
   73  * Kernel modules call real functions which are built into the kernel.
   74  * This allows kernel modules to be portable between UP and SMP systems.
   75  */
   76 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
   77 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
   78 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
   79 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   80 
   81 int     atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
   82 int     atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
   83 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
   84 u_long  atomic_fetchadd_long(volatile u_long *p, u_long v);
   85 int     atomic_testandset_int(volatile u_int *p, u_int v);
   86 int     atomic_testandset_long(volatile u_long *p, u_int v);
   87 
   88 #define ATOMIC_LOAD(TYPE, LOP)                                  \
   89 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p)
   90 #define ATOMIC_STORE(TYPE)                                      \
   91 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   92 
   93 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
   94 
   95 /*
   96  * For userland, always use lock prefixes so that the binaries will run
   97  * on both SMP and !SMP systems.
   98  */
   99 #if defined(SMP) || !defined(_KERNEL)
  100 #define MPLOCKED        "lock ; "
  101 #else
  102 #define MPLOCKED
  103 #endif
  104 
  105 /*
  106  * The assembly is volatilized to avoid code chunk removal by the compiler.
  107  * GCC aggressively reorders operations and memory clobbering is necessary
  108  * in order to avoid that for memory barriers.
  109  */
  110 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  111 static __inline void                                    \
  112 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  113 {                                                       \
  114         __asm __volatile(MPLOCKED OP                    \
  115         : "=m" (*p)                                     \
  116         : CONS (V), "m" (*p)                            \
  117         : "cc");                                        \
  118 }                                                       \
  119                                                         \
  120 static __inline void                                    \
  121 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  122 {                                                       \
  123         __asm __volatile(MPLOCKED OP                    \
  124         : "=m" (*p)                                     \
  125         : CONS (V), "m" (*p)                            \
  126         : "memory", "cc");                              \
  127 }                                                       \
  128 struct __hack
  129 
  130 /*
  131  * Atomic compare and set, used by the mutex functions
  132  *
  133  * if (*dst == expect) *dst = src (all 32 bit words)
  134  *
  135  * Returns 0 on failure, non-zero on success
  136  */
  137 
  138 static __inline int
  139 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  140 {
  141         u_char res;
  142 
  143         __asm __volatile(
  144         "       " MPLOCKED "            "
  145         "       cmpxchgl %2,%1 ;        "
  146         "       sete    %0 ;            "
  147         "1:                             "
  148         "# atomic_cmpset_int"
  149         : "=a" (res),                   /* 0 */
  150           "=m" (*dst)                   /* 1 */
  151         : "r" (src),                    /* 2 */
  152           "a" (expect),                 /* 3 */
  153           "m" (*dst)                    /* 4 */
  154         : "memory", "cc");
  155 
  156         return (res);
  157 }
  158 
  159 static __inline int
  160 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
  161 {
  162         u_char res;
  163 
  164         __asm __volatile(
  165         "       " MPLOCKED "            "
  166         "       cmpxchgq %2,%1 ;        "
  167         "       sete    %0 ;            "
  168         "1:                             "
  169         "# atomic_cmpset_long"
  170         : "=a" (res),                   /* 0 */
  171           "=m" (*dst)                   /* 1 */
  172         : "r" (src),                    /* 2 */
  173           "a" (expect),                 /* 3 */
  174           "m" (*dst)                    /* 4 */
  175         : "memory", "cc");
  176 
  177         return (res);
  178 }
  179 
  180 /*
  181  * Atomically add the value of v to the integer pointed to by p and return
  182  * the previous value of *p.
  183  */
  184 static __inline u_int
  185 atomic_fetchadd_int(volatile u_int *p, u_int v)
  186 {
  187 
  188         __asm __volatile(
  189         "       " MPLOCKED "            "
  190         "       xaddl   %0, %1 ;        "
  191         "# atomic_fetchadd_int"
  192         : "+r" (v),                     /* 0 (result) */
  193           "=m" (*p)                     /* 1 */
  194         : "m" (*p)                      /* 2 */
  195         : "cc");
  196         return (v);
  197 }
  198 
  199 /*
  200  * Atomically add the value of v to the long integer pointed to by p and return
  201  * the previous value of *p.
  202  */
  203 static __inline u_long
  204 atomic_fetchadd_long(volatile u_long *p, u_long v)
  205 {
  206 
  207         __asm __volatile(
  208         "       " MPLOCKED "            "
  209         "       xaddq   %0, %1 ;        "
  210         "# atomic_fetchadd_long"
  211         : "+r" (v),                     /* 0 (result) */
  212           "=m" (*p)                     /* 1 */
  213         : "m" (*p)                      /* 2 */
  214         : "cc");
  215         return (v);
  216 }
  217 
  218 static __inline int
  219 atomic_testandset_int(volatile u_int *p, u_int v)
  220 {
  221         u_char res;
  222 
  223         __asm __volatile(
  224         "       " MPLOCKED "            "
  225         "       btsl    %2,%1 ;         "
  226         "       setc    %0 ;            "
  227         "# atomic_testandset_int"
  228         : "=q" (res),                   /* 0 */
  229           "+m" (*p)                     /* 1 */
  230         : "Ir" (v & 0x1f)               /* 2 */
  231         : "cc");
  232         return (res);
  233 }
  234 
  235 static __inline int
  236 atomic_testandset_long(volatile u_long *p, u_int v)
  237 {
  238         u_char res;
  239 
  240         __asm __volatile(
  241         "       " MPLOCKED "            "
  242         "       btsq    %2,%1 ;         "
  243         "       setc    %0 ;            "
  244         "# atomic_testandset_long"
  245         : "=q" (res),                   /* 0 */
  246           "+m" (*p)                     /* 1 */
  247         : "Jr" ((u_long)(v & 0x3f))     /* 2 */
  248         : "cc");
  249         return (res);
  250 }
  251 
  252 /*
  253  * We assume that a = b will do atomic loads and stores.  Due to the
  254  * IA32 memory model, a simple store guarantees release semantics.
  255  *
  256  * However, loads may pass stores, so for atomic_load_acq we have to
  257  * ensure a Store/Load barrier to do the load in SMP kernels.  We use
  258  * "lock cmpxchg" as recommended by the AMD Software Optimization
  259  * Guide, and not mfence.  For UP kernels, however, the cache of the
  260  * single processor is always consistent, so we only need to take care
  261  * of the compiler.
  262  */
  263 #define ATOMIC_STORE(TYPE)                              \
  264 static __inline void                                    \
  265 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  266 {                                                       \
  267         __compiler_membar();                            \
  268         *p = v;                                         \
  269 }                                                       \
  270 struct __hack
  271 
  272 #if defined(_KERNEL) && !defined(SMP)
  273 
  274 #define ATOMIC_LOAD(TYPE, LOP)                          \
  275 static __inline u_##TYPE                                \
  276 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  277 {                                                       \
  278         u_##TYPE tmp;                                   \
  279                                                         \
  280         tmp = *p;                                       \
  281         __compiler_membar();                            \
  282         return (tmp);                                   \
  283 }                                                       \
  284 struct __hack
  285 
  286 #else /* !(_KERNEL && !SMP) */
  287 
  288 #define ATOMIC_LOAD(TYPE, LOP)                          \
  289 static __inline u_##TYPE                                \
  290 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  291 {                                                       \
  292         u_##TYPE res;                                   \
  293                                                         \
  294         __asm __volatile(MPLOCKED LOP                   \
  295         : "=a" (res),                   /* 0 */         \
  296           "=m" (*p)                     /* 1 */         \
  297         : "m" (*p)                      /* 2 */         \
  298         : "memory", "cc");                              \
  299                                                         \
  300         return (res);                                   \
  301 }                                                       \
  302 struct __hack
  303 
  304 #endif /* _KERNEL && !SMP */
  305 
  306 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  307 
  308 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  309 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  310 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  311 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  312 
  313 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  314 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  315 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  316 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  317 
  318 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  319 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  320 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  321 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  322 
  323 ATOMIC_ASM(set,      long,  "orq %1,%0",   "ir",  v);
  324 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
  325 ATOMIC_ASM(add,      long,  "addq %1,%0",  "ir",  v);
  326 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
  327 
  328 ATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
  329 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
  330 ATOMIC_LOAD(int,   "cmpxchgl %0,%1");
  331 ATOMIC_LOAD(long,  "cmpxchgq %0,%1");
  332 
  333 ATOMIC_STORE(char);
  334 ATOMIC_STORE(short);
  335 ATOMIC_STORE(int);
  336 ATOMIC_STORE(long);
  337 
  338 #undef ATOMIC_ASM
  339 #undef ATOMIC_LOAD
  340 #undef ATOMIC_STORE
  341 
  342 #ifndef WANT_FUNCTIONS
  343 
  344 /* Read the current value and store a zero in the destination. */
  345 #ifdef __GNUCLIKE_ASM
  346 
  347 static __inline u_int
  348 atomic_readandclear_int(volatile u_int *addr)
  349 {
  350         u_int res;
  351 
  352         res = 0;
  353         __asm __volatile(
  354         "       xchgl   %1,%0 ;         "
  355         "# atomic_readandclear_int"
  356         : "+r" (res),                   /* 0 */
  357           "=m" (*addr)                  /* 1 */
  358         : "m" (*addr));
  359 
  360         return (res);
  361 }
  362 
  363 static __inline u_long
  364 atomic_readandclear_long(volatile u_long *addr)
  365 {
  366         u_long res;
  367 
  368         res = 0;
  369         __asm __volatile(
  370         "       xchgq   %1,%0 ;         "
  371         "# atomic_readandclear_long"
  372         : "+r" (res),                   /* 0 */
  373           "=m" (*addr)                  /* 1 */
  374         : "m" (*addr));
  375 
  376         return (res);
  377 }
  378 
  379 static __inline u_int
  380 atomic_swap_int(volatile u_int *p, u_int v)
  381 {
  382 
  383         __asm __volatile(
  384         "       xchgl   %1,%0 ;         "
  385         "# atomic_swap_int"
  386         : "+r" (v),                     /* 0 */
  387           "+m" (*p));                   /* 1 */
  388         return (v);
  389 }
  390 
  391 static __inline u_long
  392 atomic_swap_long(volatile u_long *p, u_long v)
  393 {
  394 
  395         __asm __volatile(
  396         "       xchgq   %1,%0 ;         "
  397         "# atomic_swap_long"
  398         : "+r" (v),                     /* 0 */
  399           "+m" (*p));                   /* 1 */
  400         return (v);
  401 }
  402 
  403 #else /* !__GNUCLIKE_ASM */
  404 
  405 u_int   atomic_readandclear_int(volatile u_int *addr);
  406 u_long  atomic_readandclear_long(volatile u_long *addr);
  407 u_int   atomic_swap_int(volatile u_int *p, u_int v);
  408 u_long  atomic_swap_long(volatile u_long *p, u_long v);
  409 
  410 #endif /* __GNUCLIKE_ASM */
  411 
  412 #define atomic_set_acq_char             atomic_set_barr_char
  413 #define atomic_set_rel_char             atomic_set_barr_char
  414 #define atomic_clear_acq_char           atomic_clear_barr_char
  415 #define atomic_clear_rel_char           atomic_clear_barr_char
  416 #define atomic_add_acq_char             atomic_add_barr_char
  417 #define atomic_add_rel_char             atomic_add_barr_char
  418 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  419 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  420 
  421 #define atomic_set_acq_short            atomic_set_barr_short
  422 #define atomic_set_rel_short            atomic_set_barr_short
  423 #define atomic_clear_acq_short          atomic_clear_barr_short
  424 #define atomic_clear_rel_short          atomic_clear_barr_short
  425 #define atomic_add_acq_short            atomic_add_barr_short
  426 #define atomic_add_rel_short            atomic_add_barr_short
  427 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  428 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  429 
  430 #define atomic_set_acq_int              atomic_set_barr_int
  431 #define atomic_set_rel_int              atomic_set_barr_int
  432 #define atomic_clear_acq_int            atomic_clear_barr_int
  433 #define atomic_clear_rel_int            atomic_clear_barr_int
  434 #define atomic_add_acq_int              atomic_add_barr_int
  435 #define atomic_add_rel_int              atomic_add_barr_int
  436 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  437 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  438 #define atomic_cmpset_acq_int           atomic_cmpset_int
  439 #define atomic_cmpset_rel_int           atomic_cmpset_int
  440 
  441 #define atomic_set_acq_long             atomic_set_barr_long
  442 #define atomic_set_rel_long             atomic_set_barr_long
  443 #define atomic_clear_acq_long           atomic_clear_barr_long
  444 #define atomic_clear_rel_long           atomic_clear_barr_long
  445 #define atomic_add_acq_long             atomic_add_barr_long
  446 #define atomic_add_rel_long             atomic_add_barr_long
  447 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  448 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  449 #define atomic_cmpset_acq_long          atomic_cmpset_long
  450 #define atomic_cmpset_rel_long          atomic_cmpset_long
  451 
  452 /* Operations on 8-bit bytes. */
  453 #define atomic_set_8            atomic_set_char
  454 #define atomic_set_acq_8        atomic_set_acq_char
  455 #define atomic_set_rel_8        atomic_set_rel_char
  456 #define atomic_clear_8          atomic_clear_char
  457 #define atomic_clear_acq_8      atomic_clear_acq_char
  458 #define atomic_clear_rel_8      atomic_clear_rel_char
  459 #define atomic_add_8            atomic_add_char
  460 #define atomic_add_acq_8        atomic_add_acq_char
  461 #define atomic_add_rel_8        atomic_add_rel_char
  462 #define atomic_subtract_8       atomic_subtract_char
  463 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  464 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  465 #define atomic_load_acq_8       atomic_load_acq_char
  466 #define atomic_store_rel_8      atomic_store_rel_char
  467 
  468 /* Operations on 16-bit words. */
  469 #define atomic_set_16           atomic_set_short
  470 #define atomic_set_acq_16       atomic_set_acq_short
  471 #define atomic_set_rel_16       atomic_set_rel_short
  472 #define atomic_clear_16         atomic_clear_short
  473 #define atomic_clear_acq_16     atomic_clear_acq_short
  474 #define atomic_clear_rel_16     atomic_clear_rel_short
  475 #define atomic_add_16           atomic_add_short
  476 #define atomic_add_acq_16       atomic_add_acq_short
  477 #define atomic_add_rel_16       atomic_add_rel_short
  478 #define atomic_subtract_16      atomic_subtract_short
  479 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  480 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  481 #define atomic_load_acq_16      atomic_load_acq_short
  482 #define atomic_store_rel_16     atomic_store_rel_short
  483 
  484 /* Operations on 32-bit double words. */
  485 #define atomic_set_32           atomic_set_int
  486 #define atomic_set_acq_32       atomic_set_acq_int
  487 #define atomic_set_rel_32       atomic_set_rel_int
  488 #define atomic_clear_32         atomic_clear_int
  489 #define atomic_clear_acq_32     atomic_clear_acq_int
  490 #define atomic_clear_rel_32     atomic_clear_rel_int
  491 #define atomic_add_32           atomic_add_int
  492 #define atomic_add_acq_32       atomic_add_acq_int
  493 #define atomic_add_rel_32       atomic_add_rel_int
  494 #define atomic_subtract_32      atomic_subtract_int
  495 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  496 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  497 #define atomic_load_acq_32      atomic_load_acq_int
  498 #define atomic_store_rel_32     atomic_store_rel_int
  499 #define atomic_cmpset_32        atomic_cmpset_int
  500 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  501 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  502 #define atomic_swap_32          atomic_swap_int
  503 #define atomic_readandclear_32  atomic_readandclear_int
  504 #define atomic_fetchadd_32      atomic_fetchadd_int
  505 #define atomic_testandset_32    atomic_testandset_int
  506 
  507 /* Operations on 64-bit quad words. */
  508 #define atomic_set_64           atomic_set_long
  509 #define atomic_set_acq_64       atomic_set_acq_long
  510 #define atomic_set_rel_64       atomic_set_rel_long
  511 #define atomic_clear_64         atomic_clear_long
  512 #define atomic_clear_acq_64     atomic_clear_acq_long
  513 #define atomic_clear_rel_64     atomic_clear_rel_long
  514 #define atomic_add_64           atomic_add_long
  515 #define atomic_add_acq_64       atomic_add_acq_long
  516 #define atomic_add_rel_64       atomic_add_rel_long
  517 #define atomic_subtract_64      atomic_subtract_long
  518 #define atomic_subtract_acq_64  atomic_subtract_acq_long
  519 #define atomic_subtract_rel_64  atomic_subtract_rel_long
  520 #define atomic_load_acq_64      atomic_load_acq_long
  521 #define atomic_store_rel_64     atomic_store_rel_long
  522 #define atomic_cmpset_64        atomic_cmpset_long
  523 #define atomic_cmpset_acq_64    atomic_cmpset_acq_long
  524 #define atomic_cmpset_rel_64    atomic_cmpset_rel_long
  525 #define atomic_swap_64          atomic_swap_long
  526 #define atomic_readandclear_64  atomic_readandclear_long
  527 #define atomic_fetchadd_64      atomic_fetchadd_long
  528 #define atomic_testandset_64    atomic_testandset_long
  529 
  530 /* Operations on pointers. */
  531 #define atomic_set_ptr          atomic_set_long
  532 #define atomic_set_acq_ptr      atomic_set_acq_long
  533 #define atomic_set_rel_ptr      atomic_set_rel_long
  534 #define atomic_clear_ptr        atomic_clear_long
  535 #define atomic_clear_acq_ptr    atomic_clear_acq_long
  536 #define atomic_clear_rel_ptr    atomic_clear_rel_long
  537 #define atomic_add_ptr          atomic_add_long
  538 #define atomic_add_acq_ptr      atomic_add_acq_long
  539 #define atomic_add_rel_ptr      atomic_add_rel_long
  540 #define atomic_subtract_ptr     atomic_subtract_long
  541 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
  542 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
  543 #define atomic_load_acq_ptr     atomic_load_acq_long
  544 #define atomic_store_rel_ptr    atomic_store_rel_long
  545 #define atomic_cmpset_ptr       atomic_cmpset_long
  546 #define atomic_cmpset_acq_ptr   atomic_cmpset_acq_long
  547 #define atomic_cmpset_rel_ptr   atomic_cmpset_rel_long
  548 #define atomic_swap_ptr         atomic_swap_long
  549 #define atomic_readandclear_ptr atomic_readandclear_long
  550 
  551 #endif /* !WANT_FUNCTIONS */
  552 
  553 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 15237e2f062fa08b14a5f279ce213c77


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.