The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 #ifndef _MACHINE_ATOMIC_H_
   29 #define _MACHINE_ATOMIC_H_
   30 
   31 #ifndef _SYS_CDEFS_H_
   32 #error this file needs sys/cdefs.h as a prerequisite
   33 #endif
   34 
   35 /*
   36  * To express interprocessor (as opposed to processor and device) memory
   37  * ordering constraints, use the atomic_*() functions with acquire and release
   38  * semantics rather than the *mb() functions.  An architecture's memory
   39  * ordering (or memory consistency) model governs the order in which a
   40  * program's accesses to different locations may be performed by an
   41  * implementation of that architecture.  In general, for memory regions
   42  * defined as writeback cacheable, the memory ordering implemented by amd64
   43  * processors preserves the program ordering of a load followed by a load, a
   44  * load followed by a store, and a store followed by a store.  Only a store
   45  * followed by a load to a different memory location may be reordered.
   46  * Therefore, except for special cases, like non-temporal memory accesses or
   47  * memory regions defined as write combining, the memory ordering effects
   48  * provided by the sfence instruction in the wmb() function and the lfence
   49  * instruction in the rmb() function are redundant.  In contrast, the
   50  * atomic_*() functions with acquire and release semantics do not perform
   51  * redundant instructions for ordinary cases of interprocessor memory
   52  * ordering on any architecture.
   53  */
   54 #define mb()    __asm __volatile("mfence;" : : : "memory")
   55 #define wmb()   __asm __volatile("sfence;" : : : "memory")
   56 #define rmb()   __asm __volatile("lfence;" : : : "memory")
   57 
   58 #include <sys/atomic_common.h>
   59 
   60 /*
   61  * Various simple operations on memory, each of which is atomic in the
   62  * presence of interrupts and multiple processors.
   63  *
   64  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   65  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   66  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   67  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   68  *
   69  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   70  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   71  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   72  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   73  *
   74  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   75  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   76  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   77  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   78  * atomic_swap_int(P, V)        (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
   79  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   80  *
   81  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   82  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   83  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   84  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   85  * atomic_swap_long(P, V)       (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
   86  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   87  */
   88 
   89 /*
   90  * The above functions are expanded inline in the statically-linked
   91  * kernel.  Lock prefixes are generated if an SMP kernel is being
   92  * built.
   93  *
   94  * Kernel modules call real functions which are built into the kernel.
   95  * This allows kernel modules to be portable between UP and SMP systems.
   96  */
   97 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
   98 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
   99 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
  100 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
  101 
  102 int     atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
  103 int     atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
  104 int     atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
  105 int     atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
  106 int     atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
  107 int     atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
  108             u_short src);
  109 int     atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
  110 int     atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
  111 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
  112 u_long  atomic_fetchadd_long(volatile u_long *p, u_long v);
  113 int     atomic_testandset_int(volatile u_int *p, u_int v);
  114 int     atomic_testandset_long(volatile u_long *p, u_int v);
  115 int     atomic_testandclear_int(volatile u_int *p, u_int v);
  116 int     atomic_testandclear_long(volatile u_long *p, u_int v);
  117 void    atomic_thread_fence_acq(void);
  118 void    atomic_thread_fence_acq_rel(void);
  119 void    atomic_thread_fence_rel(void);
  120 void    atomic_thread_fence_seq_cst(void);
  121 
  122 #define ATOMIC_LOAD(TYPE)                                       \
  123 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p)
  124 #define ATOMIC_STORE(TYPE)                                      \
  125 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
  126 
  127 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
  128 
  129 /*
  130  * For userland, always use lock prefixes so that the binaries will run
  131  * on both SMP and !SMP systems.
  132  */
  133 #if defined(SMP) || !defined(_KERNEL)
  134 #define MPLOCKED        "lock ; "
  135 #else
  136 #define MPLOCKED
  137 #endif
  138 
  139 /*
  140  * The assembly is volatilized to avoid code chunk removal by the compiler.
  141  * GCC aggressively reorders operations and memory clobbering is necessary
  142  * in order to avoid that for memory barriers.
  143  */
  144 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  145 static __inline void                                    \
  146 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  147 {                                                       \
  148         __asm __volatile(MPLOCKED OP                    \
  149         : "+m" (*p)                                     \
  150         : CONS (V)                                      \
  151         : "cc");                                        \
  152 }                                                       \
  153                                                         \
  154 static __inline void                                    \
  155 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  156 {                                                       \
  157         __asm __volatile(MPLOCKED OP                    \
  158         : "+m" (*p)                                     \
  159         : CONS (V)                                      \
  160         : "memory", "cc");                              \
  161 }                                                       \
  162 struct __hack
  163 
  164 /*
  165  * Atomic compare and set, used by the mutex functions.
  166  *
  167  * cmpset:
  168  *      if (*dst == expect)
  169  *              *dst = src
  170  *
  171  * fcmpset:
  172  *      if (*dst == *expect)
  173  *              *dst = src
  174  *      else
  175  *              *expect = *dst
  176  *
  177  * Returns 0 on failure, non-zero on success.
  178  */
  179 #define ATOMIC_CMPSET(TYPE)                             \
  180 static __inline int                                     \
  181 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
  182 {                                                       \
  183         u_char res;                                     \
  184                                                         \
  185         __asm __volatile(                               \
  186         "       " MPLOCKED "            "               \
  187         "       cmpxchg %3,%1 ; "                       \
  188         "       sete    %0 ;            "               \
  189         "# atomic_cmpset_" #TYPE "      "               \
  190         : "=q" (res),                   /* 0 */         \
  191           "+m" (*dst),                  /* 1 */         \
  192           "+a" (expect)                 /* 2 */         \
  193         : "r" (src)                     /* 3 */         \
  194         : "memory", "cc");                              \
  195         return (res);                                   \
  196 }                                                       \
  197                                                         \
  198 static __inline int                                     \
  199 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
  200 {                                                       \
  201         u_char res;                                     \
  202                                                         \
  203         __asm __volatile(                               \
  204         "       " MPLOCKED "            "               \
  205         "       cmpxchg %3,%1 ;         "               \
  206         "       sete    %0 ;            "               \
  207         "# atomic_fcmpset_" #TYPE "     "               \
  208         : "=q" (res),                   /* 0 */         \
  209           "+m" (*dst),                  /* 1 */         \
  210           "+a" (*expect)                /* 2 */         \
  211         : "r" (src)                     /* 3 */         \
  212         : "memory", "cc");                              \
  213         return (res);                                   \
  214 }
  215 
  216 ATOMIC_CMPSET(char);
  217 ATOMIC_CMPSET(short);
  218 ATOMIC_CMPSET(int);
  219 ATOMIC_CMPSET(long);
  220 
  221 /*
  222  * Atomically add the value of v to the integer pointed to by p and return
  223  * the previous value of *p.
  224  */
  225 static __inline u_int
  226 atomic_fetchadd_int(volatile u_int *p, u_int v)
  227 {
  228 
  229         __asm __volatile(
  230         "       " MPLOCKED "            "
  231         "       xaddl   %0,%1 ;         "
  232         "# atomic_fetchadd_int"
  233         : "+r" (v),                     /* 0 */
  234           "+m" (*p)                     /* 1 */
  235         : : "cc");
  236         return (v);
  237 }
  238 
  239 /*
  240  * Atomically add the value of v to the long integer pointed to by p and return
  241  * the previous value of *p.
  242  */
  243 static __inline u_long
  244 atomic_fetchadd_long(volatile u_long *p, u_long v)
  245 {
  246 
  247         __asm __volatile(
  248         "       " MPLOCKED "            "
  249         "       xaddq   %0,%1 ;         "
  250         "# atomic_fetchadd_long"
  251         : "+r" (v),                     /* 0 */
  252           "+m" (*p)                     /* 1 */
  253         : : "cc");
  254         return (v);
  255 }
  256 
  257 static __inline int
  258 atomic_testandset_int(volatile u_int *p, u_int v)
  259 {
  260         u_char res;
  261 
  262         __asm __volatile(
  263         "       " MPLOCKED "            "
  264         "       btsl    %2,%1 ;         "
  265         "       setc    %0 ;            "
  266         "# atomic_testandset_int"
  267         : "=q" (res),                   /* 0 */
  268           "+m" (*p)                     /* 1 */
  269         : "Ir" (v & 0x1f)               /* 2 */
  270         : "cc");
  271         return (res);
  272 }
  273 
  274 static __inline int
  275 atomic_testandset_long(volatile u_long *p, u_int v)
  276 {
  277         u_char res;
  278 
  279         __asm __volatile(
  280         "       " MPLOCKED "            "
  281         "       btsq    %2,%1 ;         "
  282         "       setc    %0 ;            "
  283         "# atomic_testandset_long"
  284         : "=q" (res),                   /* 0 */
  285           "+m" (*p)                     /* 1 */
  286         : "Jr" ((u_long)(v & 0x3f))     /* 2 */
  287         : "cc");
  288         return (res);
  289 }
  290 
  291 static __inline int
  292 atomic_testandclear_int(volatile u_int *p, u_int v)
  293 {
  294         u_char res;
  295 
  296         __asm __volatile(
  297         "       " MPLOCKED "            "
  298         "       btrl    %2,%1 ;         "
  299         "       setc    %0 ;            "
  300         "# atomic_testandclear_int"
  301         : "=q" (res),                   /* 0 */
  302           "+m" (*p)                     /* 1 */
  303         : "Ir" (v & 0x1f)               /* 2 */
  304         : "cc");
  305         return (res);
  306 }
  307 
  308 static __inline int
  309 atomic_testandclear_long(volatile u_long *p, u_int v)
  310 {
  311         u_char res;
  312 
  313         __asm __volatile(
  314         "       " MPLOCKED "            "
  315         "       btrq    %2,%1 ;         "
  316         "       setc    %0 ;            "
  317         "# atomic_testandclear_long"
  318         : "=q" (res),                   /* 0 */
  319           "+m" (*p)                     /* 1 */
  320         : "Jr" ((u_long)(v & 0x3f))     /* 2 */
  321         : "cc");
  322         return (res);
  323 }
  324 
  325 /*
  326  * We assume that a = b will do atomic loads and stores.  Due to the
  327  * IA32 memory model, a simple store guarantees release semantics.
  328  *
  329  * However, a load may pass a store if they are performed on distinct
  330  * addresses, so we need a Store/Load barrier for sequentially
  331  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
  332  * Store/Load barrier, as recommended by the AMD Software Optimization
  333  * Guide, and not mfence.  To avoid false data dependencies, we use a
  334  * special address for "mem".  In the kernel, we use a private per-cpu
  335  * cache line.  In user space, we use a word in the stack's red zone
  336  * (-8(%rsp)).
  337  *
  338  * For UP kernels, however, the memory of the single processor is
  339  * always consistent, so we only need to stop the compiler from
  340  * reordering accesses in a way that violates the semantics of acquire
  341  * and release.
  342  */
  343 
  344 #if defined(_KERNEL)
  345 
  346 /*
  347  * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
  348  *
  349  * The open-coded number is used instead of the symbolic expression to
  350  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
  351  * An assertion in amd64/vm_machdep.c ensures that the value is correct.
  352  */
  353 #define OFFSETOF_MONITORBUF     0x180
  354 
  355 #if defined(SMP)
  356 static __inline void
  357 __storeload_barrier(void)
  358 {
  359 
  360         __asm __volatile("lock; addl $0,%%gs:%0"
  361             : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
  362 }
  363 #else /* _KERNEL && UP */
  364 static __inline void
  365 __storeload_barrier(void)
  366 {
  367 
  368         __compiler_membar();
  369 }
  370 #endif /* SMP */
  371 #else /* !_KERNEL */
  372 static __inline void
  373 __storeload_barrier(void)
  374 {
  375 
  376         __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
  377 }
  378 #endif /* _KERNEL*/
  379 
  380 #define ATOMIC_LOAD(TYPE)                                       \
  381 static __inline u_##TYPE                                        \
  382 atomic_load_acq_##TYPE(volatile u_##TYPE *p)                    \
  383 {                                                               \
  384         u_##TYPE res;                                           \
  385                                                                 \
  386         res = *p;                                               \
  387         __compiler_membar();                                    \
  388         return (res);                                           \
  389 }                                                               \
  390 struct __hack
  391 
  392 #define ATOMIC_STORE(TYPE)                                      \
  393 static __inline void                                            \
  394 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)       \
  395 {                                                               \
  396                                                                 \
  397         __compiler_membar();                                    \
  398         *p = v;                                                 \
  399 }                                                               \
  400 struct __hack
  401 
  402 static __inline void
  403 atomic_thread_fence_acq(void)
  404 {
  405 
  406         __compiler_membar();
  407 }
  408 
  409 static __inline void
  410 atomic_thread_fence_rel(void)
  411 {
  412 
  413         __compiler_membar();
  414 }
  415 
  416 static __inline void
  417 atomic_thread_fence_acq_rel(void)
  418 {
  419 
  420         __compiler_membar();
  421 }
  422 
  423 static __inline void
  424 atomic_thread_fence_seq_cst(void)
  425 {
  426 
  427         __storeload_barrier();
  428 }
  429 
  430 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  431 
  432 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  433 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  434 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  435 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  436 
  437 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  438 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  439 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  440 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  441 
  442 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  443 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  444 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  445 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  446 
  447 ATOMIC_ASM(set,      long,  "orq %1,%0",   "er",  v);
  448 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "er", ~v);
  449 ATOMIC_ASM(add,      long,  "addq %1,%0",  "er",  v);
  450 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "er",  v);
  451 
  452 #define ATOMIC_LOADSTORE(TYPE)                                  \
  453         ATOMIC_LOAD(TYPE);                                      \
  454         ATOMIC_STORE(TYPE)
  455 
  456 ATOMIC_LOADSTORE(char);
  457 ATOMIC_LOADSTORE(short);
  458 ATOMIC_LOADSTORE(int);
  459 ATOMIC_LOADSTORE(long);
  460 
  461 #undef ATOMIC_ASM
  462 #undef ATOMIC_LOAD
  463 #undef ATOMIC_STORE
  464 #undef ATOMIC_LOADSTORE
  465 #ifndef WANT_FUNCTIONS
  466 
  467 /* Read the current value and store a new value in the destination. */
  468 #ifdef __GNUCLIKE_ASM
  469 
  470 static __inline u_int
  471 atomic_swap_int(volatile u_int *p, u_int v)
  472 {
  473 
  474         __asm __volatile(
  475         "       xchgl   %1,%0 ;         "
  476         "# atomic_swap_int"
  477         : "+r" (v),                     /* 0 */
  478           "+m" (*p));                   /* 1 */
  479         return (v);
  480 }
  481 
  482 static __inline u_long
  483 atomic_swap_long(volatile u_long *p, u_long v)
  484 {
  485 
  486         __asm __volatile(
  487         "       xchgq   %1,%0 ;         "
  488         "# atomic_swap_long"
  489         : "+r" (v),                     /* 0 */
  490           "+m" (*p));                   /* 1 */
  491         return (v);
  492 }
  493 
  494 #else /* !__GNUCLIKE_ASM */
  495 
  496 u_int   atomic_swap_int(volatile u_int *p, u_int v);
  497 u_long  atomic_swap_long(volatile u_long *p, u_long v);
  498 
  499 #endif /* __GNUCLIKE_ASM */
  500 
  501 #define atomic_set_acq_char             atomic_set_barr_char
  502 #define atomic_set_rel_char             atomic_set_barr_char
  503 #define atomic_clear_acq_char           atomic_clear_barr_char
  504 #define atomic_clear_rel_char           atomic_clear_barr_char
  505 #define atomic_add_acq_char             atomic_add_barr_char
  506 #define atomic_add_rel_char             atomic_add_barr_char
  507 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  508 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  509 #define atomic_cmpset_acq_char          atomic_cmpset_char
  510 #define atomic_cmpset_rel_char          atomic_cmpset_char
  511 #define atomic_fcmpset_acq_char         atomic_fcmpset_char
  512 #define atomic_fcmpset_rel_char         atomic_fcmpset_char
  513 
  514 #define atomic_set_acq_short            atomic_set_barr_short
  515 #define atomic_set_rel_short            atomic_set_barr_short
  516 #define atomic_clear_acq_short          atomic_clear_barr_short
  517 #define atomic_clear_rel_short          atomic_clear_barr_short
  518 #define atomic_add_acq_short            atomic_add_barr_short
  519 #define atomic_add_rel_short            atomic_add_barr_short
  520 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  521 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  522 #define atomic_cmpset_acq_short         atomic_cmpset_short
  523 #define atomic_cmpset_rel_short         atomic_cmpset_short
  524 #define atomic_fcmpset_acq_short        atomic_fcmpset_short
  525 #define atomic_fcmpset_rel_short        atomic_fcmpset_short
  526 
  527 #define atomic_set_acq_int              atomic_set_barr_int
  528 #define atomic_set_rel_int              atomic_set_barr_int
  529 #define atomic_clear_acq_int            atomic_clear_barr_int
  530 #define atomic_clear_rel_int            atomic_clear_barr_int
  531 #define atomic_add_acq_int              atomic_add_barr_int
  532 #define atomic_add_rel_int              atomic_add_barr_int
  533 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  534 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  535 #define atomic_cmpset_acq_int           atomic_cmpset_int
  536 #define atomic_cmpset_rel_int           atomic_cmpset_int
  537 #define atomic_fcmpset_acq_int          atomic_fcmpset_int
  538 #define atomic_fcmpset_rel_int          atomic_fcmpset_int
  539 
  540 #define atomic_set_acq_long             atomic_set_barr_long
  541 #define atomic_set_rel_long             atomic_set_barr_long
  542 #define atomic_clear_acq_long           atomic_clear_barr_long
  543 #define atomic_clear_rel_long           atomic_clear_barr_long
  544 #define atomic_add_acq_long             atomic_add_barr_long
  545 #define atomic_add_rel_long             atomic_add_barr_long
  546 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  547 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  548 #define atomic_cmpset_acq_long          atomic_cmpset_long
  549 #define atomic_cmpset_rel_long          atomic_cmpset_long
  550 #define atomic_fcmpset_acq_long         atomic_fcmpset_long
  551 #define atomic_fcmpset_rel_long         atomic_fcmpset_long
  552 
  553 #define atomic_readandclear_int(p)      atomic_swap_int(p, 0)
  554 #define atomic_readandclear_long(p)     atomic_swap_long(p, 0)
  555 
  556 /* Operations on 8-bit bytes. */
  557 #define atomic_set_8            atomic_set_char
  558 #define atomic_set_acq_8        atomic_set_acq_char
  559 #define atomic_set_rel_8        atomic_set_rel_char
  560 #define atomic_clear_8          atomic_clear_char
  561 #define atomic_clear_acq_8      atomic_clear_acq_char
  562 #define atomic_clear_rel_8      atomic_clear_rel_char
  563 #define atomic_add_8            atomic_add_char
  564 #define atomic_add_acq_8        atomic_add_acq_char
  565 #define atomic_add_rel_8        atomic_add_rel_char
  566 #define atomic_subtract_8       atomic_subtract_char
  567 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  568 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  569 #define atomic_load_acq_8       atomic_load_acq_char
  570 #define atomic_store_rel_8      atomic_store_rel_char
  571 #define atomic_cmpset_8         atomic_cmpset_char
  572 #define atomic_cmpset_acq_8     atomic_cmpset_acq_char
  573 #define atomic_cmpset_rel_8     atomic_cmpset_rel_char
  574 #define atomic_fcmpset_8        atomic_fcmpset_char
  575 #define atomic_fcmpset_acq_8    atomic_fcmpset_acq_char
  576 #define atomic_fcmpset_rel_8    atomic_fcmpset_rel_char
  577 
  578 /* Operations on 16-bit words. */
  579 #define atomic_set_16           atomic_set_short
  580 #define atomic_set_acq_16       atomic_set_acq_short
  581 #define atomic_set_rel_16       atomic_set_rel_short
  582 #define atomic_clear_16         atomic_clear_short
  583 #define atomic_clear_acq_16     atomic_clear_acq_short
  584 #define atomic_clear_rel_16     atomic_clear_rel_short
  585 #define atomic_add_16           atomic_add_short
  586 #define atomic_add_acq_16       atomic_add_acq_short
  587 #define atomic_add_rel_16       atomic_add_rel_short
  588 #define atomic_subtract_16      atomic_subtract_short
  589 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  590 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  591 #define atomic_load_acq_16      atomic_load_acq_short
  592 #define atomic_store_rel_16     atomic_store_rel_short
  593 #define atomic_cmpset_16        atomic_cmpset_short
  594 #define atomic_cmpset_acq_16    atomic_cmpset_acq_short
  595 #define atomic_cmpset_rel_16    atomic_cmpset_rel_short
  596 #define atomic_fcmpset_16       atomic_fcmpset_short
  597 #define atomic_fcmpset_acq_16   atomic_fcmpset_acq_short
  598 #define atomic_fcmpset_rel_16   atomic_fcmpset_rel_short
  599 
  600 /* Operations on 32-bit double words. */
  601 #define atomic_set_32           atomic_set_int
  602 #define atomic_set_acq_32       atomic_set_acq_int
  603 #define atomic_set_rel_32       atomic_set_rel_int
  604 #define atomic_clear_32         atomic_clear_int
  605 #define atomic_clear_acq_32     atomic_clear_acq_int
  606 #define atomic_clear_rel_32     atomic_clear_rel_int
  607 #define atomic_add_32           atomic_add_int
  608 #define atomic_add_acq_32       atomic_add_acq_int
  609 #define atomic_add_rel_32       atomic_add_rel_int
  610 #define atomic_subtract_32      atomic_subtract_int
  611 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  612 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  613 #define atomic_load_acq_32      atomic_load_acq_int
  614 #define atomic_store_rel_32     atomic_store_rel_int
  615 #define atomic_cmpset_32        atomic_cmpset_int
  616 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  617 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  618 #define atomic_fcmpset_32       atomic_fcmpset_int
  619 #define atomic_fcmpset_acq_32   atomic_fcmpset_acq_int
  620 #define atomic_fcmpset_rel_32   atomic_fcmpset_rel_int
  621 #define atomic_swap_32          atomic_swap_int
  622 #define atomic_readandclear_32  atomic_readandclear_int
  623 #define atomic_fetchadd_32      atomic_fetchadd_int
  624 #define atomic_testandset_32    atomic_testandset_int
  625 #define atomic_testandclear_32  atomic_testandclear_int
  626 
  627 /* Operations on 64-bit quad words. */
  628 #define atomic_set_64           atomic_set_long
  629 #define atomic_set_acq_64       atomic_set_acq_long
  630 #define atomic_set_rel_64       atomic_set_rel_long
  631 #define atomic_clear_64         atomic_clear_long
  632 #define atomic_clear_acq_64     atomic_clear_acq_long
  633 #define atomic_clear_rel_64     atomic_clear_rel_long
  634 #define atomic_add_64           atomic_add_long
  635 #define atomic_add_acq_64       atomic_add_acq_long
  636 #define atomic_add_rel_64       atomic_add_rel_long
  637 #define atomic_subtract_64      atomic_subtract_long
  638 #define atomic_subtract_acq_64  atomic_subtract_acq_long
  639 #define atomic_subtract_rel_64  atomic_subtract_rel_long
  640 #define atomic_load_acq_64      atomic_load_acq_long
  641 #define atomic_store_rel_64     atomic_store_rel_long
  642 #define atomic_cmpset_64        atomic_cmpset_long
  643 #define atomic_cmpset_acq_64    atomic_cmpset_acq_long
  644 #define atomic_cmpset_rel_64    atomic_cmpset_rel_long
  645 #define atomic_fcmpset_64       atomic_fcmpset_long
  646 #define atomic_fcmpset_acq_64   atomic_fcmpset_acq_long
  647 #define atomic_fcmpset_rel_64   atomic_fcmpset_rel_long
  648 #define atomic_swap_64          atomic_swap_long
  649 #define atomic_readandclear_64  atomic_readandclear_long
  650 #define atomic_fetchadd_64      atomic_fetchadd_long
  651 #define atomic_testandset_64    atomic_testandset_long
  652 #define atomic_testandclear_64  atomic_testandclear_long
  653 
  654 /* Operations on pointers. */
  655 #define atomic_set_ptr          atomic_set_long
  656 #define atomic_set_acq_ptr      atomic_set_acq_long
  657 #define atomic_set_rel_ptr      atomic_set_rel_long
  658 #define atomic_clear_ptr        atomic_clear_long
  659 #define atomic_clear_acq_ptr    atomic_clear_acq_long
  660 #define atomic_clear_rel_ptr    atomic_clear_rel_long
  661 #define atomic_add_ptr          atomic_add_long
  662 #define atomic_add_acq_ptr      atomic_add_acq_long
  663 #define atomic_add_rel_ptr      atomic_add_rel_long
  664 #define atomic_subtract_ptr     atomic_subtract_long
  665 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
  666 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
  667 #define atomic_load_acq_ptr     atomic_load_acq_long
  668 #define atomic_store_rel_ptr    atomic_store_rel_long
  669 #define atomic_cmpset_ptr       atomic_cmpset_long
  670 #define atomic_cmpset_acq_ptr   atomic_cmpset_acq_long
  671 #define atomic_cmpset_rel_ptr   atomic_cmpset_rel_long
  672 #define atomic_fcmpset_ptr      atomic_fcmpset_long
  673 #define atomic_fcmpset_acq_ptr  atomic_fcmpset_acq_long
  674 #define atomic_fcmpset_rel_ptr  atomic_fcmpset_rel_long
  675 #define atomic_swap_ptr         atomic_swap_long
  676 #define atomic_readandclear_ptr atomic_readandclear_long
  677 
  678 #endif /* !WANT_FUNCTIONS */
  679 
  680 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 4ab1f877879713f3e5b39566c8b01a21


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.