The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/10.3/sys/i386/include/atomic.h 254620 2013-08-21 22:30:11Z jkim $
   27  */
   28 #ifndef _MACHINE_ATOMIC_H_
   29 #define _MACHINE_ATOMIC_H_
   30 
   31 #ifndef _SYS_CDEFS_H_
   32 #error this file needs sys/cdefs.h as a prerequisite
   33 #endif
   34 
   35 #ifdef _KERNEL
   36 #include <machine/md_var.h>
   37 #include <machine/specialreg.h>
   38 #endif
   39 
   40 #define mb()    __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
   41 #define wmb()   __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
   42 #define rmb()   __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
   43 
   44 /*
   45  * Various simple operations on memory, each of which is atomic in the
   46  * presence of interrupts and multiple processors.
   47  *
   48  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   49  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   50  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   51  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   52  *
   53  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   54  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   55  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   56  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   57  *
   58  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   59  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   60  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   61  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   62  * atomic_swap_int(P, V)        (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
   63  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   64  *
   65  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   66  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   67  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   68  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   69  * atomic_swap_long(P, V)       (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
   70  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   71  */
   72 
   73 /*
   74  * The above functions are expanded inline in the statically-linked
   75  * kernel.  Lock prefixes are generated if an SMP kernel is being
   76  * built.
   77  *
   78  * Kernel modules call real functions which are built into the kernel.
   79  * This allows kernel modules to be portable between UP and SMP systems.
   80  */
   81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
   82 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
   83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
   84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   85 
   86 int     atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
   87 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
   88 int     atomic_testandset_int(volatile u_int *p, u_int v);
   89 
   90 #define ATOMIC_LOAD(TYPE, LOP)                                  \
   91 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p)
   92 #define ATOMIC_STORE(TYPE)                                      \
   93 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   94 
   95 int             atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
   96 uint64_t        atomic_load_acq_64(volatile uint64_t *);
   97 void            atomic_store_rel_64(volatile uint64_t *, uint64_t);
   98 uint64_t        atomic_swap_64(volatile uint64_t *, uint64_t);
   99 
  100 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
  101 
  102 /*
  103  * For userland, always use lock prefixes so that the binaries will run
  104  * on both SMP and !SMP systems.
  105  */
  106 #if defined(SMP) || !defined(_KERNEL)
  107 #define MPLOCKED        "lock ; "
  108 #else
  109 #define MPLOCKED
  110 #endif
  111 
  112 /*
  113  * The assembly is volatilized to avoid code chunk removal by the compiler.
  114  * GCC aggressively reorders operations and memory clobbering is necessary
  115  * in order to avoid that for memory barriers.
  116  */
  117 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  118 static __inline void                                    \
  119 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  120 {                                                       \
  121         __asm __volatile(MPLOCKED OP                    \
  122         : "+m" (*p)                                     \
  123         : CONS (V)                                      \
  124         : "cc");                                        \
  125 }                                                       \
  126                                                         \
  127 static __inline void                                    \
  128 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  129 {                                                       \
  130         __asm __volatile(MPLOCKED OP                    \
  131         : "+m" (*p)                                     \
  132         : CONS (V)                                      \
  133         : "memory", "cc");                              \
  134 }                                                       \
  135 struct __hack
  136 
  137 /*
  138  * Atomic compare and set, used by the mutex functions
  139  *
  140  * if (*dst == expect) *dst = src (all 32 bit words)
  141  *
  142  * Returns 0 on failure, non-zero on success
  143  */
  144 
  145 #ifdef CPU_DISABLE_CMPXCHG
  146 
  147 static __inline int
  148 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  149 {
  150         u_char res;
  151 
  152         __asm __volatile(
  153         "       pushfl ;                "
  154         "       cli ;                   "
  155         "       cmpl    %3,%1 ;         "
  156         "       jne     1f ;            "
  157         "       movl    %2,%1 ;         "
  158         "1:                             "
  159         "       sete    %0 ;            "
  160         "       popfl ;                 "
  161         "# atomic_cmpset_int"
  162         : "=q" (res),                   /* 0 */
  163           "+m" (*dst)                   /* 1 */
  164         : "r" (src),                    /* 2 */
  165           "r" (expect)                  /* 3 */
  166         : "memory");
  167         return (res);
  168 }
  169 
  170 #else /* !CPU_DISABLE_CMPXCHG */
  171 
  172 static __inline int
  173 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  174 {
  175         u_char res;
  176 
  177         __asm __volatile(
  178         "       " MPLOCKED "            "
  179         "       cmpxchgl %3,%1 ;        "
  180         "       sete    %0 ;            "
  181         "# atomic_cmpset_int"
  182         : "=q" (res),                   /* 0 */
  183           "+m" (*dst),                  /* 1 */
  184           "+a" (expect)                 /* 2 */
  185         : "r" (src)                     /* 3 */
  186         : "memory", "cc");
  187         return (res);
  188 }
  189 
  190 #endif /* CPU_DISABLE_CMPXCHG */
  191 
  192 /*
  193  * Atomically add the value of v to the integer pointed to by p and return
  194  * the previous value of *p.
  195  */
  196 static __inline u_int
  197 atomic_fetchadd_int(volatile u_int *p, u_int v)
  198 {
  199 
  200         __asm __volatile(
  201         "       " MPLOCKED "            "
  202         "       xaddl   %0,%1 ;         "
  203         "# atomic_fetchadd_int"
  204         : "+r" (v),                     /* 0 */
  205           "+m" (*p)                     /* 1 */
  206         : : "cc");
  207         return (v);
  208 }
  209 
  210 static __inline int
  211 atomic_testandset_int(volatile u_int *p, u_int v)
  212 {
  213         u_char res;
  214 
  215         __asm __volatile(
  216         "       " MPLOCKED "            "
  217         "       btsl    %2,%1 ;         "
  218         "       setc    %0 ;            "
  219         "# atomic_testandset_int"
  220         : "=q" (res),                   /* 0 */
  221           "+m" (*p)                     /* 1 */
  222         : "Ir" (v & 0x1f)               /* 2 */
  223         : "cc");
  224         return (res);
  225 }
  226 
  227 /*
  228  * We assume that a = b will do atomic loads and stores.  Due to the
  229  * IA32 memory model, a simple store guarantees release semantics.
  230  *
  231  * However, loads may pass stores, so for atomic_load_acq we have to
  232  * ensure a Store/Load barrier to do the load in SMP kernels.  We use
  233  * "lock cmpxchg" as recommended by the AMD Software Optimization
  234  * Guide, and not mfence.  For UP kernels, however, the cache of the
  235  * single processor is always consistent, so we only need to take care
  236  * of the compiler.
  237  */
  238 #define ATOMIC_STORE(TYPE)                              \
  239 static __inline void                                    \
  240 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  241 {                                                       \
  242         __compiler_membar();                            \
  243         *p = v;                                         \
  244 }                                                       \
  245 struct __hack
  246 
  247 #if defined(_KERNEL) && !defined(SMP)
  248 
  249 #define ATOMIC_LOAD(TYPE, LOP)                          \
  250 static __inline u_##TYPE                                \
  251 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  252 {                                                       \
  253         u_##TYPE tmp;                                   \
  254                                                         \
  255         tmp = *p;                                       \
  256         __compiler_membar();                            \
  257         return (tmp);                                   \
  258 }                                                       \
  259 struct __hack
  260 
  261 #else /* !(_KERNEL && !SMP) */
  262 
  263 #define ATOMIC_LOAD(TYPE, LOP)                          \
  264 static __inline u_##TYPE                                \
  265 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  266 {                                                       \
  267         u_##TYPE res;                                   \
  268                                                         \
  269         __asm __volatile(MPLOCKED LOP                   \
  270         : "=a" (res),                   /* 0 */         \
  271           "+m" (*p)                     /* 1 */         \
  272         : : "memory", "cc");                            \
  273         return (res);                                   \
  274 }                                                       \
  275 struct __hack
  276 
  277 #endif /* _KERNEL && !SMP */
  278 
  279 #ifdef _KERNEL
  280 
  281 #ifdef WANT_FUNCTIONS
  282 int             atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
  283 int             atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
  284 uint64_t        atomic_load_acq_64_i386(volatile uint64_t *);
  285 uint64_t        atomic_load_acq_64_i586(volatile uint64_t *);
  286 void            atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
  287 void            atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
  288 uint64_t        atomic_swap_64_i386(volatile uint64_t *, uint64_t);
  289 uint64_t        atomic_swap_64_i586(volatile uint64_t *, uint64_t);
  290 #endif
  291 
  292 /* I486 does not support SMP or CMPXCHG8B. */
  293 static __inline int
  294 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  295 {
  296         volatile uint32_t *p;
  297         u_char res;
  298 
  299         p = (volatile uint32_t *)dst;
  300         __asm __volatile(
  301         "       pushfl ;                "
  302         "       cli ;                   "
  303         "       xorl    %1,%%eax ;      "
  304         "       xorl    %2,%%edx ;      "
  305         "       orl     %%edx,%%eax ;   "
  306         "       jne     1f ;            "
  307         "       movl    %4,%1 ;         "
  308         "       movl    %5,%2 ;         "
  309         "1:                             "
  310         "       sete    %3 ;            "
  311         "       popfl"
  312         : "+A" (expect),                /* 0 */
  313           "+m" (*p),                    /* 1 */
  314           "+m" (*(p + 1)),              /* 2 */
  315           "=q" (res)                    /* 3 */
  316         : "r" ((uint32_t)src),          /* 4 */
  317           "r" ((uint32_t)(src >> 32))   /* 5 */
  318         : "memory", "cc");
  319         return (res);
  320 }
  321 
  322 static __inline uint64_t
  323 atomic_load_acq_64_i386(volatile uint64_t *p)
  324 {
  325         volatile uint32_t *q;
  326         uint64_t res;
  327 
  328         q = (volatile uint32_t *)p;
  329         __asm __volatile(
  330         "       pushfl ;                "
  331         "       cli ;                   "
  332         "       movl    %1,%%eax ;      "
  333         "       movl    %2,%%edx ;      "
  334         "       popfl"
  335         : "=&A" (res)                   /* 0 */
  336         : "m" (*q),                     /* 1 */
  337           "m" (*(q + 1))                /* 2 */
  338         : "memory");
  339         return (res);
  340 }
  341 
  342 static __inline void
  343 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
  344 {
  345         volatile uint32_t *q;
  346 
  347         q = (volatile uint32_t *)p;
  348         __asm __volatile(
  349         "       pushfl ;                "
  350         "       cli ;                   "
  351         "       movl    %%eax,%0 ;      "
  352         "       movl    %%edx,%1 ;      "
  353         "       popfl"
  354         : "=m" (*q),                    /* 0 */
  355           "=m" (*(q + 1))               /* 1 */
  356         : "A" (v)                       /* 2 */
  357         : "memory");
  358 }
  359 
  360 static __inline uint64_t
  361 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
  362 {
  363         volatile uint32_t *q;
  364         uint64_t res;
  365 
  366         q = (volatile uint32_t *)p;
  367         __asm __volatile(
  368         "       pushfl ;                "
  369         "       cli ;                   "
  370         "       movl    %1,%%eax ;      "
  371         "       movl    %2,%%edx ;      "
  372         "       movl    %4,%2 ;         "
  373         "       movl    %3,%1 ;         "
  374         "       popfl"
  375         : "=&A" (res),                  /* 0 */
  376           "+m" (*q),                    /* 1 */
  377           "+m" (*(q + 1))               /* 2 */
  378         : "r" ((uint32_t)v),            /* 3 */
  379           "r" ((uint32_t)(v >> 32)));   /* 4 */
  380         return (res);
  381 }
  382 
  383 static __inline int
  384 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  385 {
  386         u_char res;
  387 
  388         __asm __volatile(
  389         "       " MPLOCKED "            "
  390         "       cmpxchg8b %1 ;          "
  391         "       sete    %0"
  392         : "=q" (res),                   /* 0 */
  393           "+m" (*dst),                  /* 1 */
  394           "+A" (expect)                 /* 2 */
  395         : "b" ((uint32_t)src),          /* 3 */
  396           "c" ((uint32_t)(src >> 32))   /* 4 */
  397         : "memory", "cc");
  398         return (res);
  399 }
  400 
  401 static __inline uint64_t
  402 atomic_load_acq_64_i586(volatile uint64_t *p)
  403 {
  404         uint64_t res;
  405 
  406         __asm __volatile(
  407         "       movl    %%ebx,%%eax ;   "
  408         "       movl    %%ecx,%%edx ;   "
  409         "       " MPLOCKED "            "
  410         "       cmpxchg8b %1"
  411         : "=&A" (res),                  /* 0 */
  412           "+m" (*p)                     /* 1 */
  413         : : "memory", "cc");
  414         return (res);
  415 }
  416 
  417 static __inline void
  418 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
  419 {
  420 
  421         __asm __volatile(
  422         "       movl    %%eax,%%ebx ;   "
  423         "       movl    %%edx,%%ecx ;   "
  424         "1:                             "
  425         "       " MPLOCKED "            "
  426         "       cmpxchg8b %0 ;          "
  427         "       jne     1b"
  428         : "+m" (*p),                    /* 0 */
  429           "+A" (v)                      /* 1 */
  430         : : "ebx", "ecx", "memory", "cc");
  431 }
  432 
  433 static __inline uint64_t
  434 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
  435 {
  436 
  437         __asm __volatile(
  438         "       movl    %%eax,%%ebx ;   "
  439         "       movl    %%edx,%%ecx ;   "
  440         "1:                             "
  441         "       " MPLOCKED "            "
  442         "       cmpxchg8b %0 ;          "
  443         "       jne     1b"
  444         : "+m" (*p),                    /* 0 */
  445           "+A" (v)                      /* 1 */
  446         : : "ebx", "ecx", "memory", "cc");
  447         return (v);
  448 }
  449 
  450 static __inline int
  451 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  452 {
  453 
  454         if ((cpu_feature & CPUID_CX8) == 0)
  455                 return (atomic_cmpset_64_i386(dst, expect, src));
  456         else
  457                 return (atomic_cmpset_64_i586(dst, expect, src));
  458 }
  459 
  460 static __inline uint64_t
  461 atomic_load_acq_64(volatile uint64_t *p)
  462 {
  463 
  464         if ((cpu_feature & CPUID_CX8) == 0)
  465                 return (atomic_load_acq_64_i386(p));
  466         else
  467                 return (atomic_load_acq_64_i586(p));
  468 }
  469 
  470 static __inline void
  471 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
  472 {
  473 
  474         if ((cpu_feature & CPUID_CX8) == 0)
  475                 atomic_store_rel_64_i386(p, v);
  476         else
  477                 atomic_store_rel_64_i586(p, v);
  478 }
  479 
  480 static __inline uint64_t
  481 atomic_swap_64(volatile uint64_t *p, uint64_t v)
  482 {
  483 
  484         if ((cpu_feature & CPUID_CX8) == 0)
  485                 return (atomic_swap_64_i386(p, v));
  486         else
  487                 return (atomic_swap_64_i586(p, v));
  488 }
  489 
  490 #endif /* _KERNEL */
  491 
  492 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  493 
  494 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  495 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  496 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  497 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  498 
  499 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  500 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  501 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  502 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  503 
  504 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  505 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  506 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  507 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  508 
  509 ATOMIC_ASM(set,      long,  "orl %1,%0",   "ir",  v);
  510 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
  511 ATOMIC_ASM(add,      long,  "addl %1,%0",  "ir",  v);
  512 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
  513 
  514 ATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
  515 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
  516 ATOMIC_LOAD(int,   "cmpxchgl %0,%1");
  517 ATOMIC_LOAD(long,  "cmpxchgl %0,%1");
  518 
  519 ATOMIC_STORE(char);
  520 ATOMIC_STORE(short);
  521 ATOMIC_STORE(int);
  522 ATOMIC_STORE(long);
  523 
  524 #undef ATOMIC_ASM
  525 #undef ATOMIC_LOAD
  526 #undef ATOMIC_STORE
  527 
  528 #ifndef WANT_FUNCTIONS
  529 
  530 static __inline int
  531 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
  532 {
  533 
  534         return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
  535             (u_int)src));
  536 }
  537 
  538 static __inline u_long
  539 atomic_fetchadd_long(volatile u_long *p, u_long v)
  540 {
  541 
  542         return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
  543 }
  544 
  545 static __inline int
  546 atomic_testandset_long(volatile u_long *p, u_int v)
  547 {
  548 
  549         return (atomic_testandset_int((volatile u_int *)p, v));
  550 }
  551 
  552 /* Read the current value and store a new value in the destination. */
  553 #ifdef __GNUCLIKE_ASM
  554 
  555 static __inline u_int
  556 atomic_swap_int(volatile u_int *p, u_int v)
  557 {
  558 
  559         __asm __volatile(
  560         "       xchgl   %1,%0 ;         "
  561         "# atomic_swap_int"
  562         : "+r" (v),                     /* 0 */
  563           "+m" (*p));                   /* 1 */
  564         return (v);
  565 }
  566 
  567 static __inline u_long
  568 atomic_swap_long(volatile u_long *p, u_long v)
  569 {
  570 
  571         return (atomic_swap_int((volatile u_int *)p, (u_int)v));
  572 }
  573 
  574 #else /* !__GNUCLIKE_ASM */
  575 
  576 u_int   atomic_swap_int(volatile u_int *p, u_int v);
  577 u_long  atomic_swap_long(volatile u_long *p, u_long v);
  578 
  579 #endif /* __GNUCLIKE_ASM */
  580 
  581 #define atomic_set_acq_char             atomic_set_barr_char
  582 #define atomic_set_rel_char             atomic_set_barr_char
  583 #define atomic_clear_acq_char           atomic_clear_barr_char
  584 #define atomic_clear_rel_char           atomic_clear_barr_char
  585 #define atomic_add_acq_char             atomic_add_barr_char
  586 #define atomic_add_rel_char             atomic_add_barr_char
  587 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  588 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  589 
  590 #define atomic_set_acq_short            atomic_set_barr_short
  591 #define atomic_set_rel_short            atomic_set_barr_short
  592 #define atomic_clear_acq_short          atomic_clear_barr_short
  593 #define atomic_clear_rel_short          atomic_clear_barr_short
  594 #define atomic_add_acq_short            atomic_add_barr_short
  595 #define atomic_add_rel_short            atomic_add_barr_short
  596 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  597 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  598 
  599 #define atomic_set_acq_int              atomic_set_barr_int
  600 #define atomic_set_rel_int              atomic_set_barr_int
  601 #define atomic_clear_acq_int            atomic_clear_barr_int
  602 #define atomic_clear_rel_int            atomic_clear_barr_int
  603 #define atomic_add_acq_int              atomic_add_barr_int
  604 #define atomic_add_rel_int              atomic_add_barr_int
  605 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  606 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  607 #define atomic_cmpset_acq_int           atomic_cmpset_int
  608 #define atomic_cmpset_rel_int           atomic_cmpset_int
  609 
  610 #define atomic_set_acq_long             atomic_set_barr_long
  611 #define atomic_set_rel_long             atomic_set_barr_long
  612 #define atomic_clear_acq_long           atomic_clear_barr_long
  613 #define atomic_clear_rel_long           atomic_clear_barr_long
  614 #define atomic_add_acq_long             atomic_add_barr_long
  615 #define atomic_add_rel_long             atomic_add_barr_long
  616 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  617 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  618 #define atomic_cmpset_acq_long          atomic_cmpset_long
  619 #define atomic_cmpset_rel_long          atomic_cmpset_long
  620 
  621 #define atomic_readandclear_int(p)      atomic_swap_int(p, 0)
  622 #define atomic_readandclear_long(p)     atomic_swap_long(p, 0)
  623 
  624 /* Operations on 8-bit bytes. */
  625 #define atomic_set_8            atomic_set_char
  626 #define atomic_set_acq_8        atomic_set_acq_char
  627 #define atomic_set_rel_8        atomic_set_rel_char
  628 #define atomic_clear_8          atomic_clear_char
  629 #define atomic_clear_acq_8      atomic_clear_acq_char
  630 #define atomic_clear_rel_8      atomic_clear_rel_char
  631 #define atomic_add_8            atomic_add_char
  632 #define atomic_add_acq_8        atomic_add_acq_char
  633 #define atomic_add_rel_8        atomic_add_rel_char
  634 #define atomic_subtract_8       atomic_subtract_char
  635 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  636 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  637 #define atomic_load_acq_8       atomic_load_acq_char
  638 #define atomic_store_rel_8      atomic_store_rel_char
  639 
  640 /* Operations on 16-bit words. */
  641 #define atomic_set_16           atomic_set_short
  642 #define atomic_set_acq_16       atomic_set_acq_short
  643 #define atomic_set_rel_16       atomic_set_rel_short
  644 #define atomic_clear_16         atomic_clear_short
  645 #define atomic_clear_acq_16     atomic_clear_acq_short
  646 #define atomic_clear_rel_16     atomic_clear_rel_short
  647 #define atomic_add_16           atomic_add_short
  648 #define atomic_add_acq_16       atomic_add_acq_short
  649 #define atomic_add_rel_16       atomic_add_rel_short
  650 #define atomic_subtract_16      atomic_subtract_short
  651 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  652 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  653 #define atomic_load_acq_16      atomic_load_acq_short
  654 #define atomic_store_rel_16     atomic_store_rel_short
  655 
  656 /* Operations on 32-bit double words. */
  657 #define atomic_set_32           atomic_set_int
  658 #define atomic_set_acq_32       atomic_set_acq_int
  659 #define atomic_set_rel_32       atomic_set_rel_int
  660 #define atomic_clear_32         atomic_clear_int
  661 #define atomic_clear_acq_32     atomic_clear_acq_int
  662 #define atomic_clear_rel_32     atomic_clear_rel_int
  663 #define atomic_add_32           atomic_add_int
  664 #define atomic_add_acq_32       atomic_add_acq_int
  665 #define atomic_add_rel_32       atomic_add_rel_int
  666 #define atomic_subtract_32      atomic_subtract_int
  667 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  668 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  669 #define atomic_load_acq_32      atomic_load_acq_int
  670 #define atomic_store_rel_32     atomic_store_rel_int
  671 #define atomic_cmpset_32        atomic_cmpset_int
  672 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  673 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  674 #define atomic_swap_32          atomic_swap_int
  675 #define atomic_readandclear_32  atomic_readandclear_int
  676 #define atomic_fetchadd_32      atomic_fetchadd_int
  677 #define atomic_testandset_32    atomic_testandset_int
  678 
  679 /* Operations on pointers. */
  680 #define atomic_set_ptr(p, v) \
  681         atomic_set_int((volatile u_int *)(p), (u_int)(v))
  682 #define atomic_set_acq_ptr(p, v) \
  683         atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
  684 #define atomic_set_rel_ptr(p, v) \
  685         atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
  686 #define atomic_clear_ptr(p, v) \
  687         atomic_clear_int((volatile u_int *)(p), (u_int)(v))
  688 #define atomic_clear_acq_ptr(p, v) \
  689         atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
  690 #define atomic_clear_rel_ptr(p, v) \
  691         atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
  692 #define atomic_add_ptr(p, v) \
  693         atomic_add_int((volatile u_int *)(p), (u_int)(v))
  694 #define atomic_add_acq_ptr(p, v) \
  695         atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
  696 #define atomic_add_rel_ptr(p, v) \
  697         atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
  698 #define atomic_subtract_ptr(p, v) \
  699         atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
  700 #define atomic_subtract_acq_ptr(p, v) \
  701         atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
  702 #define atomic_subtract_rel_ptr(p, v) \
  703         atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
  704 #define atomic_load_acq_ptr(p) \
  705         atomic_load_acq_int((volatile u_int *)(p))
  706 #define atomic_store_rel_ptr(p, v) \
  707         atomic_store_rel_int((volatile u_int *)(p), (v))
  708 #define atomic_cmpset_ptr(dst, old, new) \
  709         atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
  710 #define atomic_cmpset_acq_ptr(dst, old, new) \
  711         atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
  712             (u_int)(new))
  713 #define atomic_cmpset_rel_ptr(dst, old, new) \
  714         atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
  715             (u_int)(new))
  716 #define atomic_swap_ptr(p, v) \
  717         atomic_swap_int((volatile u_int *)(p), (u_int)(v))
  718 #define atomic_readandclear_ptr(p) \
  719         atomic_readandclear_int((volatile u_int *)(p))
  720 
  721 #endif /* !WANT_FUNCTIONS */
  722 
  723 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 519aebc6df325e15752252c4d25c7673


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.