The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/9.0/sys/i386/include/atomic.h 220404 2011-04-06 23:59:59Z jkim $
   27  */
   28 #ifndef _MACHINE_ATOMIC_H_
   29 #define _MACHINE_ATOMIC_H_
   30 
   31 #ifndef _SYS_CDEFS_H_
   32 #error this file needs sys/cdefs.h as a prerequisite
   33 #endif
   34 
   35 #define mb()    __asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
   36 #define wmb()   __asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
   37 #define rmb()   __asm __volatile("lock; addl $0,(%%esp)" : : : "memory")
   38 
   39 /*
   40  * Various simple operations on memory, each of which is atomic in the
   41  * presence of interrupts and multiple processors.
   42  *
   43  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   44  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   45  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   46  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   47  *
   48  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   49  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   50  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   51  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   52  *
   53  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   54  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   55  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   56  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   57  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   58  *
   59  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   60  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   61  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   62  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   63  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   64  */
   65 
   66 /*
   67  * The above functions are expanded inline in the statically-linked
   68  * kernel.  Lock prefixes are generated if an SMP kernel is being
   69  * built.
   70  *
   71  * Kernel modules call real functions which are built into the kernel.
   72  * This allows kernel modules to be portable between UP and SMP systems.
   73  */
   74 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
   75 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
   76 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
   77 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   78 
   79 int     atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
   80 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
   81 
   82 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)                       \
   83 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p);   \
   84 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   85 
   86 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
   87 
   88 /*
   89  * For userland, always use lock prefixes so that the binaries will run
   90  * on both SMP and !SMP systems.
   91  */
   92 #if defined(SMP) || !defined(_KERNEL)
   93 #define MPLOCKED        "lock ; "
   94 #else
   95 #define MPLOCKED
   96 #endif
   97 
   98 /*
   99  * The assembly is volatilized to avoid code chunk removal by the compiler.
  100  * GCC aggressively reorders operations and memory clobbering is necessary
  101  * in order to avoid that for memory barriers.
  102  */
  103 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  104 static __inline void                                    \
  105 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  106 {                                                       \
  107         __asm __volatile(MPLOCKED OP                    \
  108         : "=m" (*p)                                     \
  109         : CONS (V), "m" (*p)                            \
  110         : "cc");                                        \
  111 }                                                       \
  112                                                         \
  113 static __inline void                                    \
  114 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  115 {                                                       \
  116         __asm __volatile(MPLOCKED OP                    \
  117         : "=m" (*p)                                     \
  118         : CONS (V), "m" (*p)                            \
  119         : "memory", "cc");                              \
  120 }                                                       \
  121 struct __hack
  122 
  123 #if defined(_KERNEL) && !defined(WANT_FUNCTIONS)
  124 
  125 /* I486 does not support SMP or CMPXCHG8B. */
  126 static __inline uint64_t
  127 atomic_load_acq_64_i386(volatile uint64_t *p)
  128 {
  129         volatile uint32_t *high, *low;
  130         uint64_t res;
  131 
  132         low = (volatile uint32_t *)p;
  133         high = (volatile uint32_t *)p + 1;
  134         __asm __volatile(
  135         "       pushfl ;                "
  136         "       cli ;                   "
  137         "       movl %1,%%eax ;         "
  138         "       movl %2,%%edx ;         "
  139         "       popfl"
  140         : "=&A" (res)                   /* 0 */
  141         : "m" (*low),                   /* 1 */
  142           "m" (*high)                   /* 2 */
  143         : "memory");
  144 
  145         return (res);
  146 }
  147 
  148 static __inline void
  149 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
  150 {
  151         volatile uint32_t *high, *low;
  152 
  153         low = (volatile uint32_t *)p;
  154         high = (volatile uint32_t *)p + 1;
  155         __asm __volatile(
  156         "       pushfl ;                "
  157         "       cli ;                   "
  158         "       movl %%eax,%0 ;         "
  159         "       movl %%edx,%1 ;         "
  160         "       popfl"
  161         : "=m" (*low),                  /* 0 */
  162           "=m" (*high)                  /* 1 */
  163         : "A" (v)                       /* 2 */
  164         : "memory");
  165 }
  166 
  167 static __inline uint64_t
  168 atomic_load_acq_64_i586(volatile uint64_t *p)
  169 {
  170         uint64_t res;
  171 
  172         __asm __volatile(
  173         "       movl %%ebx,%%eax ;      "
  174         "       movl %%ecx,%%edx ;      "
  175         "       " MPLOCKED "            "
  176         "       cmpxchg8b %2"
  177         : "=&A" (res),                  /* 0 */
  178           "=m" (*p)                     /* 1 */
  179         : "m" (*p)                      /* 2 */
  180         : "memory", "cc");
  181 
  182         return (res);
  183 }
  184 
  185 static __inline void
  186 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
  187 {
  188 
  189         __asm __volatile(
  190         "       movl %%eax,%%ebx ;      "
  191         "       movl %%edx,%%ecx ;      "
  192         "1:                             "
  193         "       " MPLOCKED "            "
  194         "       cmpxchg8b %2 ;          "
  195         "       jne 1b"
  196         : "=m" (*p),                    /* 0 */
  197           "+A" (v)                      /* 1 */
  198         : "m" (*p)                      /* 2 */
  199         : "ebx", "ecx", "memory", "cc");
  200 }
  201 
  202 #endif /* _KERNEL && !WANT_FUNCTIONS */
  203 
  204 /*
  205  * Atomic compare and set, used by the mutex functions
  206  *
  207  * if (*dst == expect) *dst = src (all 32 bit words)
  208  *
  209  * Returns 0 on failure, non-zero on success
  210  */
  211 
  212 #ifdef CPU_DISABLE_CMPXCHG
  213 
  214 static __inline int
  215 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  216 {
  217         u_char res;
  218 
  219         __asm __volatile(
  220         "       pushfl ;                "
  221         "       cli ;                   "
  222         "       cmpl    %3,%4 ;         "
  223         "       jne     1f ;            "
  224         "       movl    %2,%1 ;         "
  225         "1:                             "
  226         "       sete    %0 ;            "
  227         "       popfl ;                 "
  228         "# atomic_cmpset_int"
  229         : "=q" (res),                   /* 0 */
  230           "=m" (*dst)                   /* 1 */
  231         : "r" (src),                    /* 2 */
  232           "r" (expect),                 /* 3 */
  233           "m" (*dst)                    /* 4 */
  234         : "memory");
  235 
  236         return (res);
  237 }
  238 
  239 #else /* !CPU_DISABLE_CMPXCHG */
  240 
  241 static __inline int
  242 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  243 {
  244         u_char res;
  245 
  246         __asm __volatile(
  247         "       " MPLOCKED "            "
  248         "       cmpxchgl %2,%1 ;        "
  249         "       sete    %0 ;            "
  250         "1:                             "
  251         "# atomic_cmpset_int"
  252         : "=a" (res),                   /* 0 */
  253           "=m" (*dst)                   /* 1 */
  254         : "r" (src),                    /* 2 */
  255           "a" (expect),                 /* 3 */
  256           "m" (*dst)                    /* 4 */
  257         : "memory", "cc");
  258 
  259         return (res);
  260 }
  261 
  262 #endif /* CPU_DISABLE_CMPXCHG */
  263 
  264 /*
  265  * Atomically add the value of v to the integer pointed to by p and return
  266  * the previous value of *p.
  267  */
  268 static __inline u_int
  269 atomic_fetchadd_int(volatile u_int *p, u_int v)
  270 {
  271 
  272         __asm __volatile(
  273         "       " MPLOCKED "            "
  274         "       xaddl   %0, %1 ;        "
  275         "# atomic_fetchadd_int"
  276         : "+r" (v),                     /* 0 (result) */
  277           "=m" (*p)                     /* 1 */
  278         : "m" (*p)                      /* 2 */
  279         : "cc");
  280         return (v);
  281 }
  282 
  283 #if defined(_KERNEL) && !defined(SMP)
  284 
  285 /*
  286  * We assume that a = b will do atomic loads and stores.  However, on a
  287  * PentiumPro or higher, reads may pass writes, so for that case we have
  288  * to use a serializing instruction (i.e. with LOCK) to do the load in
  289  * SMP kernels.  For UP kernels, however, the cache of the single processor
  290  * is always consistent, so we only need to take care of compiler.
  291  */
  292 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)               \
  293 static __inline u_##TYPE                                \
  294 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  295 {                                                       \
  296         u_##TYPE tmp;                                   \
  297                                                         \
  298         tmp = *p;                                       \
  299         __asm __volatile("" : : : "memory");            \
  300         return (tmp);                                   \
  301 }                                                       \
  302                                                         \
  303 static __inline void                                    \
  304 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  305 {                                                       \
  306         __asm __volatile("" : : : "memory");            \
  307         *p = v;                                         \
  308 }                                                       \
  309 struct __hack
  310 
  311 #else /* !(_KERNEL && !SMP) */
  312 
  313 #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP)               \
  314 static __inline u_##TYPE                                \
  315 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  316 {                                                       \
  317         u_##TYPE res;                                   \
  318                                                         \
  319         __asm __volatile(MPLOCKED LOP                   \
  320         : "=a" (res),                   /* 0 */         \
  321           "=m" (*p)                     /* 1 */         \
  322         : "m" (*p)                      /* 2 */         \
  323         : "memory", "cc");                              \
  324                                                         \
  325         return (res);                                   \
  326 }                                                       \
  327                                                         \
  328 /*                                                      \
  329  * The XCHG instruction asserts LOCK automagically.     \
  330  */                                                     \
  331 static __inline void                                    \
  332 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  333 {                                                       \
  334         __asm __volatile(SOP                            \
  335         : "=m" (*p),                    /* 0 */         \
  336           "+r" (v)                      /* 1 */         \
  337         : "m" (*p)                      /* 2 */         \
  338         : "memory");                                    \
  339 }                                                       \
  340 struct __hack
  341 
  342 #endif /* _KERNEL && !SMP */
  343 
  344 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  345 
  346 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  347 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  348 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  349 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  350 
  351 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  352 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  353 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  354 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  355 
  356 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  357 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  358 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  359 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  360 
  361 ATOMIC_ASM(set,      long,  "orl %1,%0",   "ir",  v);
  362 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
  363 ATOMIC_ASM(add,      long,  "addl %1,%0",  "ir",  v);
  364 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
  365 
  366 ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0");
  367 ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0");
  368 ATOMIC_STORE_LOAD(int,  "cmpxchgl %0,%1",  "xchgl %1,%0");
  369 ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1",  "xchgl %1,%0");
  370 
  371 #undef ATOMIC_ASM
  372 #undef ATOMIC_STORE_LOAD
  373 
  374 #ifndef WANT_FUNCTIONS
  375 
  376 #ifdef _KERNEL
  377 extern uint64_t (*atomic_load_acq_64)(volatile uint64_t *);
  378 extern void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t);
  379 #endif
  380 
  381 static __inline int
  382 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
  383 {
  384 
  385         return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
  386             (u_int)src));
  387 }
  388 
  389 static __inline u_long
  390 atomic_fetchadd_long(volatile u_long *p, u_long v)
  391 {
  392 
  393         return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
  394 }
  395 
  396 /* Read the current value and store a zero in the destination. */
  397 #ifdef __GNUCLIKE_ASM
  398 
  399 static __inline u_int
  400 atomic_readandclear_int(volatile u_int *addr)
  401 {
  402         u_int res;
  403 
  404         res = 0;
  405         __asm __volatile(
  406         "       xchgl   %1,%0 ;         "
  407         "# atomic_readandclear_int"
  408         : "+r" (res),                   /* 0 */
  409           "=m" (*addr)                  /* 1 */
  410         : "m" (*addr));
  411 
  412         return (res);
  413 }
  414 
  415 static __inline u_long
  416 atomic_readandclear_long(volatile u_long *addr)
  417 {
  418         u_long res;
  419 
  420         res = 0;
  421         __asm __volatile(
  422         "       xchgl   %1,%0 ;         "
  423         "# atomic_readandclear_long"
  424         : "+r" (res),                   /* 0 */
  425           "=m" (*addr)                  /* 1 */
  426         : "m" (*addr));
  427 
  428         return (res);
  429 }
  430 
  431 #else /* !__GNUCLIKE_ASM */
  432 
  433 u_int   atomic_readandclear_int(volatile u_int *addr);
  434 u_long  atomic_readandclear_long(volatile u_long *addr);
  435 
  436 #endif /* __GNUCLIKE_ASM */
  437 
  438 #define atomic_set_acq_char             atomic_set_barr_char
  439 #define atomic_set_rel_char             atomic_set_barr_char
  440 #define atomic_clear_acq_char           atomic_clear_barr_char
  441 #define atomic_clear_rel_char           atomic_clear_barr_char
  442 #define atomic_add_acq_char             atomic_add_barr_char
  443 #define atomic_add_rel_char             atomic_add_barr_char
  444 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  445 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  446 
  447 #define atomic_set_acq_short            atomic_set_barr_short
  448 #define atomic_set_rel_short            atomic_set_barr_short
  449 #define atomic_clear_acq_short          atomic_clear_barr_short
  450 #define atomic_clear_rel_short          atomic_clear_barr_short
  451 #define atomic_add_acq_short            atomic_add_barr_short
  452 #define atomic_add_rel_short            atomic_add_barr_short
  453 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  454 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  455 
  456 #define atomic_set_acq_int              atomic_set_barr_int
  457 #define atomic_set_rel_int              atomic_set_barr_int
  458 #define atomic_clear_acq_int            atomic_clear_barr_int
  459 #define atomic_clear_rel_int            atomic_clear_barr_int
  460 #define atomic_add_acq_int              atomic_add_barr_int
  461 #define atomic_add_rel_int              atomic_add_barr_int
  462 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  463 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  464 #define atomic_cmpset_acq_int           atomic_cmpset_int
  465 #define atomic_cmpset_rel_int           atomic_cmpset_int
  466 
  467 #define atomic_set_acq_long             atomic_set_barr_long
  468 #define atomic_set_rel_long             atomic_set_barr_long
  469 #define atomic_clear_acq_long           atomic_clear_barr_long
  470 #define atomic_clear_rel_long           atomic_clear_barr_long
  471 #define atomic_add_acq_long             atomic_add_barr_long
  472 #define atomic_add_rel_long             atomic_add_barr_long
  473 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  474 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  475 #define atomic_cmpset_acq_long          atomic_cmpset_long
  476 #define atomic_cmpset_rel_long          atomic_cmpset_long
  477 
  478 /* Operations on 8-bit bytes. */
  479 #define atomic_set_8            atomic_set_char
  480 #define atomic_set_acq_8        atomic_set_acq_char
  481 #define atomic_set_rel_8        atomic_set_rel_char
  482 #define atomic_clear_8          atomic_clear_char
  483 #define atomic_clear_acq_8      atomic_clear_acq_char
  484 #define atomic_clear_rel_8      atomic_clear_rel_char
  485 #define atomic_add_8            atomic_add_char
  486 #define atomic_add_acq_8        atomic_add_acq_char
  487 #define atomic_add_rel_8        atomic_add_rel_char
  488 #define atomic_subtract_8       atomic_subtract_char
  489 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  490 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  491 #define atomic_load_acq_8       atomic_load_acq_char
  492 #define atomic_store_rel_8      atomic_store_rel_char
  493 
  494 /* Operations on 16-bit words. */
  495 #define atomic_set_16           atomic_set_short
  496 #define atomic_set_acq_16       atomic_set_acq_short
  497 #define atomic_set_rel_16       atomic_set_rel_short
  498 #define atomic_clear_16         atomic_clear_short
  499 #define atomic_clear_acq_16     atomic_clear_acq_short
  500 #define atomic_clear_rel_16     atomic_clear_rel_short
  501 #define atomic_add_16           atomic_add_short
  502 #define atomic_add_acq_16       atomic_add_acq_short
  503 #define atomic_add_rel_16       atomic_add_rel_short
  504 #define atomic_subtract_16      atomic_subtract_short
  505 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  506 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  507 #define atomic_load_acq_16      atomic_load_acq_short
  508 #define atomic_store_rel_16     atomic_store_rel_short
  509 
  510 /* Operations on 32-bit double words. */
  511 #define atomic_set_32           atomic_set_int
  512 #define atomic_set_acq_32       atomic_set_acq_int
  513 #define atomic_set_rel_32       atomic_set_rel_int
  514 #define atomic_clear_32         atomic_clear_int
  515 #define atomic_clear_acq_32     atomic_clear_acq_int
  516 #define atomic_clear_rel_32     atomic_clear_rel_int
  517 #define atomic_add_32           atomic_add_int
  518 #define atomic_add_acq_32       atomic_add_acq_int
  519 #define atomic_add_rel_32       atomic_add_rel_int
  520 #define atomic_subtract_32      atomic_subtract_int
  521 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  522 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  523 #define atomic_load_acq_32      atomic_load_acq_int
  524 #define atomic_store_rel_32     atomic_store_rel_int
  525 #define atomic_cmpset_32        atomic_cmpset_int
  526 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  527 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  528 #define atomic_readandclear_32  atomic_readandclear_int
  529 #define atomic_fetchadd_32      atomic_fetchadd_int
  530 
  531 /* Operations on pointers. */
  532 #define atomic_set_ptr(p, v) \
  533         atomic_set_int((volatile u_int *)(p), (u_int)(v))
  534 #define atomic_set_acq_ptr(p, v) \
  535         atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
  536 #define atomic_set_rel_ptr(p, v) \
  537         atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
  538 #define atomic_clear_ptr(p, v) \
  539         atomic_clear_int((volatile u_int *)(p), (u_int)(v))
  540 #define atomic_clear_acq_ptr(p, v) \
  541         atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
  542 #define atomic_clear_rel_ptr(p, v) \
  543         atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
  544 #define atomic_add_ptr(p, v) \
  545         atomic_add_int((volatile u_int *)(p), (u_int)(v))
  546 #define atomic_add_acq_ptr(p, v) \
  547         atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
  548 #define atomic_add_rel_ptr(p, v) \
  549         atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
  550 #define atomic_subtract_ptr(p, v) \
  551         atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
  552 #define atomic_subtract_acq_ptr(p, v) \
  553         atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
  554 #define atomic_subtract_rel_ptr(p, v) \
  555         atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
  556 #define atomic_load_acq_ptr(p) \
  557         atomic_load_acq_int((volatile u_int *)(p))
  558 #define atomic_store_rel_ptr(p, v) \
  559         atomic_store_rel_int((volatile u_int *)(p), (v))
  560 #define atomic_cmpset_ptr(dst, old, new) \
  561         atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
  562 #define atomic_cmpset_acq_ptr(dst, old, new) \
  563         atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
  564             (u_int)(new))
  565 #define atomic_cmpset_rel_ptr(dst, old, new) \
  566         atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
  567             (u_int)(new))
  568 #define atomic_readandclear_ptr(p) \
  569         atomic_readandclear_int((volatile u_int *)(p))
  570 
  571 #endif /* !WANT_FUNCTIONS */
  572 
  573 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 8c73c34529622f3a1199dca86a759283


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.