The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/8.3/sys/amd64/include/atomic.h 216869 2011-01-01 08:47:38Z kib $
   27  */
   28 #ifndef _MACHINE_ATOMIC_H_
   29 #define _MACHINE_ATOMIC_H_
   30 
   31 #ifndef _SYS_CDEFS_H_
   32 #error this file needs sys/cdefs.h as a prerequisite
   33 #endif
   34 
   35 #define mb()    __asm __volatile("mfence;" : : : "memory")
   36 #define wmb()   __asm __volatile("sfence;" : : : "memory")
   37 #define rmb()   __asm __volatile("lfence;" : : : "memory")
   38 
   39 /*
   40  * Various simple operations on memory, each of which is atomic in the
   41  * presence of interrupts and multiple processors.
   42  *
   43  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   44  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   45  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   46  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   47  *
   48  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   49  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   50  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   51  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   52  *
   53  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   54  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   55  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   56  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   57  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   58  *
   59  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   60  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   61  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   62  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   63  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   64  */
   65 
   66 /*
   67  * The above functions are expanded inline in the statically-linked
   68  * kernel.  Lock prefixes are generated if an SMP kernel is being
   69  * built.
   70  *
   71  * Kernel modules call real functions which are built into the kernel.
   72  * This allows kernel modules to be portable between UP and SMP systems.
   73  */
   74 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
   75 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
   76 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
   77 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   78 
   79 int     atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src);
   80 int     atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src);
   81 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
   82 u_long  atomic_fetchadd_long(volatile u_long *p, u_long v);
   83 
   84 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)                       \
   85 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p);   \
   86 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   87 
   88 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
   89 
   90 /*
   91  * For userland, always use lock prefixes so that the binaries will run
   92  * on both SMP and !SMP systems.
   93  */
   94 #if defined(SMP) || !defined(_KERNEL)
   95 #define MPLOCKED        "lock ; "
   96 #else
   97 #define MPLOCKED
   98 #endif
   99 
  100 /*
  101  * The assembly is volatilized to avoid code chunk removal by the compiler.
  102  * GCC aggressively reorders operations and memory clobbering is necessary
  103  * in order to avoid that for memory barriers.
  104  */
  105 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  106 static __inline void                                    \
  107 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  108 {                                                       \
  109         __asm __volatile(MPLOCKED OP                    \
  110         : "=m" (*p)                                     \
  111         : CONS (V), "m" (*p)                            \
  112         : "cc");                                        \
  113 }                                                       \
  114                                                         \
  115 static __inline void                                    \
  116 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  117 {                                                       \
  118         __asm __volatile(MPLOCKED OP                    \
  119         : "=m" (*p)                                     \
  120         : CONS (V), "m" (*p)                            \
  121         : "memory", "cc");                              \
  122 }                                                       \
  123 struct __hack
  124 
  125 /*
  126  * Atomic compare and set, used by the mutex functions
  127  *
  128  * if (*dst == exp) *dst = src (all 32 bit words)
  129  *
  130  * Returns 0 on failure, non-zero on success
  131  */
  132 
  133 static __inline int
  134 atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
  135 {
  136         u_char res;
  137 
  138         __asm __volatile(
  139         "       " MPLOCKED "            "
  140         "       cmpxchgl %2,%1 ;        "
  141         "       sete    %0 ;            "
  142         "1:                             "
  143         "# atomic_cmpset_int"
  144         : "=a" (res),                   /* 0 */
  145           "=m" (*dst)                   /* 1 */
  146         : "r" (src),                    /* 2 */
  147           "a" (exp),                    /* 3 */
  148           "m" (*dst)                    /* 4 */
  149         : "memory", "cc");
  150 
  151         return (res);
  152 }
  153 
  154 static __inline int
  155 atomic_cmpset_long(volatile u_long *dst, u_long exp, u_long src)
  156 {
  157         u_char res;
  158 
  159         __asm __volatile(
  160         "       " MPLOCKED "            "
  161         "       cmpxchgq %2,%1 ;        "
  162         "       sete    %0 ;            "
  163         "1:                             "
  164         "# atomic_cmpset_long"
  165         : "=a" (res),                   /* 0 */
  166           "=m" (*dst)                   /* 1 */
  167         : "r" (src),                    /* 2 */
  168           "a" (exp),                    /* 3 */
  169           "m" (*dst)                    /* 4 */
  170         : "memory", "cc");
  171 
  172         return (res);
  173 }
  174 
  175 /*
  176  * Atomically add the value of v to the integer pointed to by p and return
  177  * the previous value of *p.
  178  */
  179 static __inline u_int
  180 atomic_fetchadd_int(volatile u_int *p, u_int v)
  181 {
  182 
  183         __asm __volatile(
  184         "       " MPLOCKED "            "
  185         "       xaddl   %0, %1 ;        "
  186         "# atomic_fetchadd_int"
  187         : "+r" (v),                     /* 0 (result) */
  188           "=m" (*p)                     /* 1 */
  189         : "m" (*p)                      /* 2 */
  190         : "cc");
  191         return (v);
  192 }
  193 
  194 /*
  195  * Atomically add the value of v to the long integer pointed to by p and return
  196  * the previous value of *p.
  197  */
  198 static __inline u_long
  199 atomic_fetchadd_long(volatile u_long *p, u_long v)
  200 {
  201 
  202         __asm __volatile(
  203         "       " MPLOCKED "            "
  204         "       xaddq   %0, %1 ;        "
  205         "# atomic_fetchadd_long"
  206         : "+r" (v),                     /* 0 (result) */
  207           "=m" (*p)                     /* 1 */
  208         : "m" (*p)                      /* 2 */
  209         : "cc");
  210         return (v);
  211 }
  212 
  213 #if defined(_KERNEL) && !defined(SMP)
  214 
  215 /*
  216  * We assume that a = b will do atomic loads and stores.  However, on a
  217  * PentiumPro or higher, reads may pass writes, so for that case we have
  218  * to use a serializing instruction (i.e. with LOCK) to do the load in
  219  * SMP kernels.  For UP kernels, however, the cache of the single processor
  220  * is always consistent, so we only need to take care of compiler.
  221  */
  222 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)               \
  223 static __inline u_##TYPE                                \
  224 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  225 {                                                       \
  226         u_##TYPE tmp;                                   \
  227                                                         \
  228         tmp = *p;                                       \
  229         __asm __volatile ("" : : : "memory");           \
  230         return (tmp);                                   \
  231 }                                                       \
  232                                                         \
  233 static __inline void                                    \
  234 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  235 {                                                       \
  236         __asm __volatile ("" : : : "memory");           \
  237         *p = v;                                         \
  238 }                                                       \
  239 struct __hack
  240 
  241 #else /* !(_KERNEL && !SMP) */
  242 
  243 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)               \
  244 static __inline u_##TYPE                                \
  245 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  246 {                                                       \
  247         u_##TYPE res;                                   \
  248                                                         \
  249         __asm __volatile(MPLOCKED LOP                   \
  250         : "=a" (res),                   /* 0 */         \
  251           "=m" (*p)                     /* 1 */         \
  252         : "m" (*p)                      /* 2 */         \
  253         : "memory", "cc");                              \
  254                                                         \
  255         return (res);                                   \
  256 }                                                       \
  257                                                         \
  258 /*                                                      \
  259  * The XCHG instruction asserts LOCK automagically.     \
  260  */                                                     \
  261 static __inline void                                    \
  262 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  263 {                                                       \
  264         __asm __volatile(SOP                            \
  265         : "=m" (*p),                    /* 0 */         \
  266           "+r" (v)                      /* 1 */         \
  267         : "m" (*p)                      /* 2 */         \
  268         : "memory");                                    \
  269 }                                                       \
  270 struct __hack
  271 
  272 #endif /* _KERNEL && !SMP */
  273 
  274 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  275 
  276 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  277 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  278 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  279 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  280 
  281 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  282 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  283 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  284 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  285 
  286 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  287 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  288 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  289 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  290 
  291 ATOMIC_ASM(set,      long,  "orq %1,%0",   "ir",  v);
  292 ATOMIC_ASM(clear,    long,  "andq %1,%0",  "ir", ~v);
  293 ATOMIC_ASM(add,      long,  "addq %1,%0",  "ir",  v);
  294 ATOMIC_ASM(subtract, long,  "subq %1,%0",  "ir",  v);
  295 
  296 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
  297 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
  298 ATOMIC_STORE_LOAD(int,  "cmpxchgl %0,%1",  "xchgl %1,%0");
  299 ATOMIC_STORE_LOAD(long, "cmpxchgq %0,%1",  "xchgq %1,%0");
  300 
  301 #undef ATOMIC_ASM
  302 #undef ATOMIC_STORE_LOAD
  303 
  304 #ifndef WANT_FUNCTIONS
  305 
  306 /* Read the current value and store a zero in the destination. */
  307 #ifdef __GNUCLIKE_ASM
  308 
  309 static __inline u_int
  310 atomic_readandclear_int(volatile u_int *addr)
  311 {
  312         u_int res;
  313 
  314         res = 0;
  315         __asm __volatile(
  316         "       xchgl   %1,%0 ;         "
  317         "# atomic_readandclear_int"
  318         : "+r" (res),                   /* 0 */
  319           "=m" (*addr)                  /* 1 */
  320         : "m" (*addr));
  321 
  322         return (res);
  323 }
  324 
  325 static __inline u_long
  326 atomic_readandclear_long(volatile u_long *addr)
  327 {
  328         u_long res;
  329 
  330         res = 0;
  331         __asm __volatile(
  332         "       xchgq   %1,%0 ;         "
  333         "# atomic_readandclear_long"
  334         : "+r" (res),                   /* 0 */
  335           "=m" (*addr)                  /* 1 */
  336         : "m" (*addr));
  337 
  338         return (res);
  339 }
  340 
  341 #else /* !__GNUCLIKE_ASM */
  342 
  343 u_int   atomic_readandclear_int(volatile u_int *addr);
  344 u_long  atomic_readandclear_long(volatile u_long *addr);
  345 
  346 #endif /* __GNUCLIKE_ASM */
  347 
  348 #define atomic_set_acq_char             atomic_set_barr_char
  349 #define atomic_set_rel_char             atomic_set_barr_char
  350 #define atomic_clear_acq_char           atomic_clear_barr_char
  351 #define atomic_clear_rel_char           atomic_clear_barr_char
  352 #define atomic_add_acq_char             atomic_add_barr_char
  353 #define atomic_add_rel_char             atomic_add_barr_char
  354 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  355 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  356 
  357 #define atomic_set_acq_short            atomic_set_barr_short
  358 #define atomic_set_rel_short            atomic_set_barr_short
  359 #define atomic_clear_acq_short          atomic_clear_barr_short
  360 #define atomic_clear_rel_short          atomic_clear_barr_short
  361 #define atomic_add_acq_short            atomic_add_barr_short
  362 #define atomic_add_rel_short            atomic_add_barr_short
  363 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  364 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  365 
  366 #define atomic_set_acq_int              atomic_set_barr_int
  367 #define atomic_set_rel_int              atomic_set_barr_int
  368 #define atomic_clear_acq_int            atomic_clear_barr_int
  369 #define atomic_clear_rel_int            atomic_clear_barr_int
  370 #define atomic_add_acq_int              atomic_add_barr_int
  371 #define atomic_add_rel_int              atomic_add_barr_int
  372 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  373 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  374 #define atomic_cmpset_acq_int           atomic_cmpset_int
  375 #define atomic_cmpset_rel_int           atomic_cmpset_int
  376 
  377 #define atomic_set_acq_long             atomic_set_barr_long
  378 #define atomic_set_rel_long             atomic_set_barr_long
  379 #define atomic_clear_acq_long           atomic_clear_barr_long
  380 #define atomic_clear_rel_long           atomic_clear_barr_long
  381 #define atomic_add_acq_long             atomic_add_barr_long
  382 #define atomic_add_rel_long             atomic_add_barr_long
  383 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  384 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  385 #define atomic_cmpset_acq_long          atomic_cmpset_long
  386 #define atomic_cmpset_rel_long          atomic_cmpset_long
  387 
  388 /* Operations on 8-bit bytes. */
  389 #define atomic_set_8            atomic_set_char
  390 #define atomic_set_acq_8        atomic_set_acq_char
  391 #define atomic_set_rel_8        atomic_set_rel_char
  392 #define atomic_clear_8          atomic_clear_char
  393 #define atomic_clear_acq_8      atomic_clear_acq_char
  394 #define atomic_clear_rel_8      atomic_clear_rel_char
  395 #define atomic_add_8            atomic_add_char
  396 #define atomic_add_acq_8        atomic_add_acq_char
  397 #define atomic_add_rel_8        atomic_add_rel_char
  398 #define atomic_subtract_8       atomic_subtract_char
  399 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  400 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  401 #define atomic_load_acq_8       atomic_load_acq_char
  402 #define atomic_store_rel_8      atomic_store_rel_char
  403 
  404 /* Operations on 16-bit words. */
  405 #define atomic_set_16           atomic_set_short
  406 #define atomic_set_acq_16       atomic_set_acq_short
  407 #define atomic_set_rel_16       atomic_set_rel_short
  408 #define atomic_clear_16         atomic_clear_short
  409 #define atomic_clear_acq_16     atomic_clear_acq_short
  410 #define atomic_clear_rel_16     atomic_clear_rel_short
  411 #define atomic_add_16           atomic_add_short
  412 #define atomic_add_acq_16       atomic_add_acq_short
  413 #define atomic_add_rel_16       atomic_add_rel_short
  414 #define atomic_subtract_16      atomic_subtract_short
  415 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  416 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  417 #define atomic_load_acq_16      atomic_load_acq_short
  418 #define atomic_store_rel_16     atomic_store_rel_short
  419 
  420 /* Operations on 32-bit double words. */
  421 #define atomic_set_32           atomic_set_int
  422 #define atomic_set_acq_32       atomic_set_acq_int
  423 #define atomic_set_rel_32       atomic_set_rel_int
  424 #define atomic_clear_32         atomic_clear_int
  425 #define atomic_clear_acq_32     atomic_clear_acq_int
  426 #define atomic_clear_rel_32     atomic_clear_rel_int
  427 #define atomic_add_32           atomic_add_int
  428 #define atomic_add_acq_32       atomic_add_acq_int
  429 #define atomic_add_rel_32       atomic_add_rel_int
  430 #define atomic_subtract_32      atomic_subtract_int
  431 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  432 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  433 #define atomic_load_acq_32      atomic_load_acq_int
  434 #define atomic_store_rel_32     atomic_store_rel_int
  435 #define atomic_cmpset_32        atomic_cmpset_int
  436 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  437 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  438 #define atomic_readandclear_32  atomic_readandclear_int
  439 #define atomic_fetchadd_32      atomic_fetchadd_int
  440 
  441 /* Operations on 64-bit quad words. */
  442 #define atomic_set_64           atomic_set_long
  443 #define atomic_set_acq_64       atomic_set_acq_long
  444 #define atomic_set_rel_64       atomic_set_rel_long
  445 #define atomic_clear_64         atomic_clear_long
  446 #define atomic_clear_acq_64     atomic_clear_acq_long
  447 #define atomic_clear_rel_64     atomic_clear_rel_long
  448 #define atomic_add_64           atomic_add_long
  449 #define atomic_add_acq_64       atomic_add_acq_long
  450 #define atomic_add_rel_64       atomic_add_rel_long
  451 #define atomic_subtract_64      atomic_subtract_long
  452 #define atomic_subtract_acq_64  atomic_subtract_acq_long
  453 #define atomic_subtract_rel_64  atomic_subtract_rel_long
  454 #define atomic_load_acq_64      atomic_load_acq_long
  455 #define atomic_store_rel_64     atomic_store_rel_long
  456 #define atomic_cmpset_64        atomic_cmpset_long
  457 #define atomic_cmpset_acq_64    atomic_cmpset_acq_long
  458 #define atomic_cmpset_rel_64    atomic_cmpset_rel_long
  459 #define atomic_readandclear_64  atomic_readandclear_long
  460 
  461 /* Operations on pointers. */
  462 #define atomic_set_ptr          atomic_set_long
  463 #define atomic_set_acq_ptr      atomic_set_acq_long
  464 #define atomic_set_rel_ptr      atomic_set_rel_long
  465 #define atomic_clear_ptr        atomic_clear_long
  466 #define atomic_clear_acq_ptr    atomic_clear_acq_long
  467 #define atomic_clear_rel_ptr    atomic_clear_rel_long
  468 #define atomic_add_ptr          atomic_add_long
  469 #define atomic_add_acq_ptr      atomic_add_acq_long
  470 #define atomic_add_rel_ptr      atomic_add_rel_long
  471 #define atomic_subtract_ptr     atomic_subtract_long
  472 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
  473 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
  474 #define atomic_load_acq_ptr     atomic_load_acq_long
  475 #define atomic_store_rel_ptr    atomic_store_rel_long
  476 #define atomic_cmpset_ptr       atomic_cmpset_long
  477 #define atomic_cmpset_acq_ptr   atomic_cmpset_acq_long
  478 #define atomic_cmpset_rel_ptr   atomic_cmpset_rel_long
  479 #define atomic_readandclear_ptr atomic_readandclear_long
  480 
  481 #endif /* !WANT_FUNCTIONS */
  482 
  483 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 30ebabaa370f4e2f494c41e48dadf069


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.