The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1998 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD: releng/10.4/sys/i386/include/atomic.h 302108 2016-06-23 02:21:37Z sephe $
   27  */
   28 #ifndef _MACHINE_ATOMIC_H_
   29 #define _MACHINE_ATOMIC_H_
   30 
   31 #ifndef _SYS_CDEFS_H_
   32 #error this file needs sys/cdefs.h as a prerequisite
   33 #endif
   34 
   35 #ifdef _KERNEL
   36 #include <machine/md_var.h>
   37 #include <machine/specialreg.h>
   38 #endif
   39 
   40 #define mb()    __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
   41 #define wmb()   __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
   42 #define rmb()   __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc")
   43 
   44 /*
   45  * Various simple operations on memory, each of which is atomic in the
   46  * presence of interrupts and multiple processors.
   47  *
   48  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   49  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   50  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   51  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   52  *
   53  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   54  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   55  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   56  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   57  *
   58  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   59  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   60  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   61  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   62  * atomic_swap_int(P, V)        (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
   63  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   64  *
   65  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   66  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   67  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   68  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   69  * atomic_swap_long(P, V)       (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
   70  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   71  */
   72 
   73 /*
   74  * The above functions are expanded inline in the statically-linked
   75  * kernel.  Lock prefixes are generated if an SMP kernel is being
   76  * built.
   77  *
   78  * Kernel modules call real functions which are built into the kernel.
   79  * This allows kernel modules to be portable between UP and SMP systems.
   80  */
   81 #if defined(KLD_MODULE) || !defined(__GNUCLIKE_ASM)
   82 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
   83 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
   84 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   85 
   86 int     atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
   87 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
   88 int     atomic_testandset_int(volatile u_int *p, u_int v);
   89 int     atomic_testandclear_int(volatile u_int *p, u_int v);
   90 
   91 #define ATOMIC_LOAD(TYPE, LOP)                                  \
   92 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p)
   93 #define ATOMIC_STORE(TYPE)                                      \
   94 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
   95 
   96 int             atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
   97 uint64_t        atomic_load_acq_64(volatile uint64_t *);
   98 void            atomic_store_rel_64(volatile uint64_t *, uint64_t);
   99 uint64_t        atomic_swap_64(volatile uint64_t *, uint64_t);
  100 
  101 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
  102 
  103 /*
  104  * For userland, always use lock prefixes so that the binaries will run
  105  * on both SMP and !SMP systems.
  106  */
  107 #if defined(SMP) || !defined(_KERNEL)
  108 #define MPLOCKED        "lock ; "
  109 #else
  110 #define MPLOCKED
  111 #endif
  112 
  113 /*
  114  * The assembly is volatilized to avoid code chunk removal by the compiler.
  115  * GCC aggressively reorders operations and memory clobbering is necessary
  116  * in order to avoid that for memory barriers.
  117  */
  118 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  119 static __inline void                                    \
  120 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  121 {                                                       \
  122         __asm __volatile(MPLOCKED OP                    \
  123         : "+m" (*p)                                     \
  124         : CONS (V)                                      \
  125         : "cc");                                        \
  126 }                                                       \
  127                                                         \
  128 static __inline void                                    \
  129 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  130 {                                                       \
  131         __asm __volatile(MPLOCKED OP                    \
  132         : "+m" (*p)                                     \
  133         : CONS (V)                                      \
  134         : "memory", "cc");                              \
  135 }                                                       \
  136 struct __hack
  137 
  138 /*
  139  * Atomic compare and set, used by the mutex functions
  140  *
  141  * if (*dst == expect) *dst = src (all 32 bit words)
  142  *
  143  * Returns 0 on failure, non-zero on success
  144  */
  145 
  146 #ifdef CPU_DISABLE_CMPXCHG
  147 
  148 static __inline int
  149 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  150 {
  151         u_char res;
  152 
  153         __asm __volatile(
  154         "       pushfl ;                "
  155         "       cli ;                   "
  156         "       cmpl    %3,%1 ;         "
  157         "       jne     1f ;            "
  158         "       movl    %2,%1 ;         "
  159         "1:                             "
  160         "       sete    %0 ;            "
  161         "       popfl ;                 "
  162         "# atomic_cmpset_int"
  163         : "=q" (res),                   /* 0 */
  164           "+m" (*dst)                   /* 1 */
  165         : "r" (src),                    /* 2 */
  166           "r" (expect)                  /* 3 */
  167         : "memory");
  168         return (res);
  169 }
  170 
  171 #else /* !CPU_DISABLE_CMPXCHG */
  172 
  173 static __inline int
  174 atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
  175 {
  176         u_char res;
  177 
  178         __asm __volatile(
  179         "       " MPLOCKED "            "
  180         "       cmpxchgl %3,%1 ;        "
  181         "       sete    %0 ;            "
  182         "# atomic_cmpset_int"
  183         : "=q" (res),                   /* 0 */
  184           "+m" (*dst),                  /* 1 */
  185           "+a" (expect)                 /* 2 */
  186         : "r" (src)                     /* 3 */
  187         : "memory", "cc");
  188         return (res);
  189 }
  190 
  191 #endif /* CPU_DISABLE_CMPXCHG */
  192 
  193 /*
  194  * Atomically add the value of v to the integer pointed to by p and return
  195  * the previous value of *p.
  196  */
  197 static __inline u_int
  198 atomic_fetchadd_int(volatile u_int *p, u_int v)
  199 {
  200 
  201         __asm __volatile(
  202         "       " MPLOCKED "            "
  203         "       xaddl   %0,%1 ;         "
  204         "# atomic_fetchadd_int"
  205         : "+r" (v),                     /* 0 */
  206           "+m" (*p)                     /* 1 */
  207         : : "cc");
  208         return (v);
  209 }
  210 
  211 static __inline int
  212 atomic_testandset_int(volatile u_int *p, u_int v)
  213 {
  214         u_char res;
  215 
  216         __asm __volatile(
  217         "       " MPLOCKED "            "
  218         "       btsl    %2,%1 ;         "
  219         "       setc    %0 ;            "
  220         "# atomic_testandset_int"
  221         : "=q" (res),                   /* 0 */
  222           "+m" (*p)                     /* 1 */
  223         : "Ir" (v & 0x1f)               /* 2 */
  224         : "cc");
  225         return (res);
  226 }
  227 
  228 static __inline int
  229 atomic_testandclear_int(volatile u_int *p, u_int v)
  230 {
  231         u_char res;
  232 
  233         __asm __volatile(
  234         "       " MPLOCKED "            "
  235         "       btrl    %2,%1 ;         "
  236         "       setc    %0 ;            "
  237         "# atomic_testandclear_int"
  238         : "=q" (res),                   /* 0 */
  239           "+m" (*p)                     /* 1 */
  240         : "Ir" (v & 0x1f)               /* 2 */
  241         : "cc");
  242         return (res);
  243 }
  244 
  245 /*
  246  * We assume that a = b will do atomic loads and stores.  Due to the
  247  * IA32 memory model, a simple store guarantees release semantics.
  248  *
  249  * However, loads may pass stores, so for atomic_load_acq we have to
  250  * ensure a Store/Load barrier to do the load in SMP kernels.  We use
  251  * "lock cmpxchg" as recommended by the AMD Software Optimization
  252  * Guide, and not mfence.  For UP kernels, however, the cache of the
  253  * single processor is always consistent, so we only need to take care
  254  * of the compiler.
  255  */
  256 #define ATOMIC_STORE(TYPE)                              \
  257 static __inline void                                    \
  258 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  259 {                                                       \
  260         __compiler_membar();                            \
  261         *p = v;                                         \
  262 }                                                       \
  263 struct __hack
  264 
  265 #if defined(_KERNEL) && !defined(SMP)
  266 
  267 #define ATOMIC_LOAD(TYPE, LOP)                          \
  268 static __inline u_##TYPE                                \
  269 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  270 {                                                       \
  271         u_##TYPE tmp;                                   \
  272                                                         \
  273         tmp = *p;                                       \
  274         __compiler_membar();                            \
  275         return (tmp);                                   \
  276 }                                                       \
  277 struct __hack
  278 
  279 #else /* !(_KERNEL && !SMP) */
  280 
  281 #define ATOMIC_LOAD(TYPE, LOP)                          \
  282 static __inline u_##TYPE                                \
  283 atomic_load_acq_##TYPE(volatile u_##TYPE *p)            \
  284 {                                                       \
  285         u_##TYPE res;                                   \
  286                                                         \
  287         __asm __volatile(MPLOCKED LOP                   \
  288         : "=a" (res),                   /* 0 */         \
  289           "+m" (*p)                     /* 1 */         \
  290         : : "memory", "cc");                            \
  291         return (res);                                   \
  292 }                                                       \
  293 struct __hack
  294 
  295 #endif /* _KERNEL && !SMP */
  296 
  297 #ifdef _KERNEL
  298 
  299 #ifdef WANT_FUNCTIONS
  300 int             atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
  301 int             atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
  302 uint64_t        atomic_load_acq_64_i386(volatile uint64_t *);
  303 uint64_t        atomic_load_acq_64_i586(volatile uint64_t *);
  304 void            atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
  305 void            atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
  306 uint64_t        atomic_swap_64_i386(volatile uint64_t *, uint64_t);
  307 uint64_t        atomic_swap_64_i586(volatile uint64_t *, uint64_t);
  308 #endif
  309 
  310 /* I486 does not support SMP or CMPXCHG8B. */
  311 static __inline int
  312 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  313 {
  314         volatile uint32_t *p;
  315         u_char res;
  316 
  317         p = (volatile uint32_t *)dst;
  318         __asm __volatile(
  319         "       pushfl ;                "
  320         "       cli ;                   "
  321         "       xorl    %1,%%eax ;      "
  322         "       xorl    %2,%%edx ;      "
  323         "       orl     %%edx,%%eax ;   "
  324         "       jne     1f ;            "
  325         "       movl    %4,%1 ;         "
  326         "       movl    %5,%2 ;         "
  327         "1:                             "
  328         "       sete    %3 ;            "
  329         "       popfl"
  330         : "+A" (expect),                /* 0 */
  331           "+m" (*p),                    /* 1 */
  332           "+m" (*(p + 1)),              /* 2 */
  333           "=q" (res)                    /* 3 */
  334         : "r" ((uint32_t)src),          /* 4 */
  335           "r" ((uint32_t)(src >> 32))   /* 5 */
  336         : "memory", "cc");
  337         return (res);
  338 }
  339 
  340 static __inline uint64_t
  341 atomic_load_acq_64_i386(volatile uint64_t *p)
  342 {
  343         volatile uint32_t *q;
  344         uint64_t res;
  345 
  346         q = (volatile uint32_t *)p;
  347         __asm __volatile(
  348         "       pushfl ;                "
  349         "       cli ;                   "
  350         "       movl    %1,%%eax ;      "
  351         "       movl    %2,%%edx ;      "
  352         "       popfl"
  353         : "=&A" (res)                   /* 0 */
  354         : "m" (*q),                     /* 1 */
  355           "m" (*(q + 1))                /* 2 */
  356         : "memory");
  357         return (res);
  358 }
  359 
  360 static __inline void
  361 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
  362 {
  363         volatile uint32_t *q;
  364 
  365         q = (volatile uint32_t *)p;
  366         __asm __volatile(
  367         "       pushfl ;                "
  368         "       cli ;                   "
  369         "       movl    %%eax,%0 ;      "
  370         "       movl    %%edx,%1 ;      "
  371         "       popfl"
  372         : "=m" (*q),                    /* 0 */
  373           "=m" (*(q + 1))               /* 1 */
  374         : "A" (v)                       /* 2 */
  375         : "memory");
  376 }
  377 
  378 static __inline uint64_t
  379 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
  380 {
  381         volatile uint32_t *q;
  382         uint64_t res;
  383 
  384         q = (volatile uint32_t *)p;
  385         __asm __volatile(
  386         "       pushfl ;                "
  387         "       cli ;                   "
  388         "       movl    %1,%%eax ;      "
  389         "       movl    %2,%%edx ;      "
  390         "       movl    %4,%2 ;         "
  391         "       movl    %3,%1 ;         "
  392         "       popfl"
  393         : "=&A" (res),                  /* 0 */
  394           "+m" (*q),                    /* 1 */
  395           "+m" (*(q + 1))               /* 2 */
  396         : "r" ((uint32_t)v),            /* 3 */
  397           "r" ((uint32_t)(v >> 32)));   /* 4 */
  398         return (res);
  399 }
  400 
  401 static __inline int
  402 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  403 {
  404         u_char res;
  405 
  406         __asm __volatile(
  407         "       " MPLOCKED "            "
  408         "       cmpxchg8b %1 ;          "
  409         "       sete    %0"
  410         : "=q" (res),                   /* 0 */
  411           "+m" (*dst),                  /* 1 */
  412           "+A" (expect)                 /* 2 */
  413         : "b" ((uint32_t)src),          /* 3 */
  414           "c" ((uint32_t)(src >> 32))   /* 4 */
  415         : "memory", "cc");
  416         return (res);
  417 }
  418 
  419 static __inline uint64_t
  420 atomic_load_acq_64_i586(volatile uint64_t *p)
  421 {
  422         uint64_t res;
  423 
  424         __asm __volatile(
  425         "       movl    %%ebx,%%eax ;   "
  426         "       movl    %%ecx,%%edx ;   "
  427         "       " MPLOCKED "            "
  428         "       cmpxchg8b %1"
  429         : "=&A" (res),                  /* 0 */
  430           "+m" (*p)                     /* 1 */
  431         : : "memory", "cc");
  432         return (res);
  433 }
  434 
  435 static __inline void
  436 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
  437 {
  438 
  439         __asm __volatile(
  440         "       movl    %%eax,%%ebx ;   "
  441         "       movl    %%edx,%%ecx ;   "
  442         "1:                             "
  443         "       " MPLOCKED "            "
  444         "       cmpxchg8b %0 ;          "
  445         "       jne     1b"
  446         : "+m" (*p),                    /* 0 */
  447           "+A" (v)                      /* 1 */
  448         : : "ebx", "ecx", "memory", "cc");
  449 }
  450 
  451 static __inline uint64_t
  452 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
  453 {
  454 
  455         __asm __volatile(
  456         "       movl    %%eax,%%ebx ;   "
  457         "       movl    %%edx,%%ecx ;   "
  458         "1:                             "
  459         "       " MPLOCKED "            "
  460         "       cmpxchg8b %0 ;          "
  461         "       jne     1b"
  462         : "+m" (*p),                    /* 0 */
  463           "+A" (v)                      /* 1 */
  464         : : "ebx", "ecx", "memory", "cc");
  465         return (v);
  466 }
  467 
  468 static __inline int
  469 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  470 {
  471 
  472         if ((cpu_feature & CPUID_CX8) == 0)
  473                 return (atomic_cmpset_64_i386(dst, expect, src));
  474         else
  475                 return (atomic_cmpset_64_i586(dst, expect, src));
  476 }
  477 
  478 static __inline uint64_t
  479 atomic_load_acq_64(volatile uint64_t *p)
  480 {
  481 
  482         if ((cpu_feature & CPUID_CX8) == 0)
  483                 return (atomic_load_acq_64_i386(p));
  484         else
  485                 return (atomic_load_acq_64_i586(p));
  486 }
  487 
  488 static __inline void
  489 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
  490 {
  491 
  492         if ((cpu_feature & CPUID_CX8) == 0)
  493                 atomic_store_rel_64_i386(p, v);
  494         else
  495                 atomic_store_rel_64_i586(p, v);
  496 }
  497 
  498 static __inline uint64_t
  499 atomic_swap_64(volatile uint64_t *p, uint64_t v)
  500 {
  501 
  502         if ((cpu_feature & CPUID_CX8) == 0)
  503                 return (atomic_swap_64_i386(p, v));
  504         else
  505                 return (atomic_swap_64_i586(p, v));
  506 }
  507 
  508 #endif /* _KERNEL */
  509 
  510 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  511 
  512 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  513 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  514 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  515 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  516 
  517 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  518 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  519 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  520 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  521 
  522 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  523 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  524 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  525 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  526 
  527 ATOMIC_ASM(set,      long,  "orl %1,%0",   "ir",  v);
  528 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
  529 ATOMIC_ASM(add,      long,  "addl %1,%0",  "ir",  v);
  530 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
  531 
  532 ATOMIC_LOAD(char,  "cmpxchgb %b0,%1");
  533 ATOMIC_LOAD(short, "cmpxchgw %w0,%1");
  534 ATOMIC_LOAD(int,   "cmpxchgl %0,%1");
  535 ATOMIC_LOAD(long,  "cmpxchgl %0,%1");
  536 
  537 ATOMIC_STORE(char);
  538 ATOMIC_STORE(short);
  539 ATOMIC_STORE(int);
  540 ATOMIC_STORE(long);
  541 
  542 #undef ATOMIC_ASM
  543 #undef ATOMIC_LOAD
  544 #undef ATOMIC_STORE
  545 
  546 #ifndef WANT_FUNCTIONS
  547 
  548 static __inline int
  549 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
  550 {
  551 
  552         return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
  553             (u_int)src));
  554 }
  555 
  556 static __inline u_long
  557 atomic_fetchadd_long(volatile u_long *p, u_long v)
  558 {
  559 
  560         return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
  561 }
  562 
  563 static __inline int
  564 atomic_testandset_long(volatile u_long *p, u_int v)
  565 {
  566 
  567         return (atomic_testandset_int((volatile u_int *)p, v));
  568 }
  569 
  570 static __inline int
  571 atomic_testandclear_long(volatile u_long *p, u_int v)
  572 {
  573 
  574         return (atomic_testandclear_int((volatile u_int *)p, v));
  575 }
  576 
  577 /* Read the current value and store a new value in the destination. */
  578 #ifdef __GNUCLIKE_ASM
  579 
  580 static __inline u_int
  581 atomic_swap_int(volatile u_int *p, u_int v)
  582 {
  583 
  584         __asm __volatile(
  585         "       xchgl   %1,%0 ;         "
  586         "# atomic_swap_int"
  587         : "+r" (v),                     /* 0 */
  588           "+m" (*p));                   /* 1 */
  589         return (v);
  590 }
  591 
  592 static __inline u_long
  593 atomic_swap_long(volatile u_long *p, u_long v)
  594 {
  595 
  596         return (atomic_swap_int((volatile u_int *)p, (u_int)v));
  597 }
  598 
  599 #else /* !__GNUCLIKE_ASM */
  600 
  601 u_int   atomic_swap_int(volatile u_int *p, u_int v);
  602 u_long  atomic_swap_long(volatile u_long *p, u_long v);
  603 
  604 #endif /* __GNUCLIKE_ASM */
  605 
  606 #define atomic_set_acq_char             atomic_set_barr_char
  607 #define atomic_set_rel_char             atomic_set_barr_char
  608 #define atomic_clear_acq_char           atomic_clear_barr_char
  609 #define atomic_clear_rel_char           atomic_clear_barr_char
  610 #define atomic_add_acq_char             atomic_add_barr_char
  611 #define atomic_add_rel_char             atomic_add_barr_char
  612 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  613 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  614 
  615 #define atomic_set_acq_short            atomic_set_barr_short
  616 #define atomic_set_rel_short            atomic_set_barr_short
  617 #define atomic_clear_acq_short          atomic_clear_barr_short
  618 #define atomic_clear_rel_short          atomic_clear_barr_short
  619 #define atomic_add_acq_short            atomic_add_barr_short
  620 #define atomic_add_rel_short            atomic_add_barr_short
  621 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  622 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  623 
  624 #define atomic_set_acq_int              atomic_set_barr_int
  625 #define atomic_set_rel_int              atomic_set_barr_int
  626 #define atomic_clear_acq_int            atomic_clear_barr_int
  627 #define atomic_clear_rel_int            atomic_clear_barr_int
  628 #define atomic_add_acq_int              atomic_add_barr_int
  629 #define atomic_add_rel_int              atomic_add_barr_int
  630 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  631 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  632 #define atomic_cmpset_acq_int           atomic_cmpset_int
  633 #define atomic_cmpset_rel_int           atomic_cmpset_int
  634 
  635 #define atomic_set_acq_long             atomic_set_barr_long
  636 #define atomic_set_rel_long             atomic_set_barr_long
  637 #define atomic_clear_acq_long           atomic_clear_barr_long
  638 #define atomic_clear_rel_long           atomic_clear_barr_long
  639 #define atomic_add_acq_long             atomic_add_barr_long
  640 #define atomic_add_rel_long             atomic_add_barr_long
  641 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  642 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  643 #define atomic_cmpset_acq_long          atomic_cmpset_long
  644 #define atomic_cmpset_rel_long          atomic_cmpset_long
  645 
  646 #define atomic_readandclear_int(p)      atomic_swap_int(p, 0)
  647 #define atomic_readandclear_long(p)     atomic_swap_long(p, 0)
  648 
  649 /* Operations on 8-bit bytes. */
  650 #define atomic_set_8            atomic_set_char
  651 #define atomic_set_acq_8        atomic_set_acq_char
  652 #define atomic_set_rel_8        atomic_set_rel_char
  653 #define atomic_clear_8          atomic_clear_char
  654 #define atomic_clear_acq_8      atomic_clear_acq_char
  655 #define atomic_clear_rel_8      atomic_clear_rel_char
  656 #define atomic_add_8            atomic_add_char
  657 #define atomic_add_acq_8        atomic_add_acq_char
  658 #define atomic_add_rel_8        atomic_add_rel_char
  659 #define atomic_subtract_8       atomic_subtract_char
  660 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  661 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  662 #define atomic_load_acq_8       atomic_load_acq_char
  663 #define atomic_store_rel_8      atomic_store_rel_char
  664 
  665 /* Operations on 16-bit words. */
  666 #define atomic_set_16           atomic_set_short
  667 #define atomic_set_acq_16       atomic_set_acq_short
  668 #define atomic_set_rel_16       atomic_set_rel_short
  669 #define atomic_clear_16         atomic_clear_short
  670 #define atomic_clear_acq_16     atomic_clear_acq_short
  671 #define atomic_clear_rel_16     atomic_clear_rel_short
  672 #define atomic_add_16           atomic_add_short
  673 #define atomic_add_acq_16       atomic_add_acq_short
  674 #define atomic_add_rel_16       atomic_add_rel_short
  675 #define atomic_subtract_16      atomic_subtract_short
  676 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  677 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  678 #define atomic_load_acq_16      atomic_load_acq_short
  679 #define atomic_store_rel_16     atomic_store_rel_short
  680 
  681 /* Operations on 32-bit double words. */
  682 #define atomic_set_32           atomic_set_int
  683 #define atomic_set_acq_32       atomic_set_acq_int
  684 #define atomic_set_rel_32       atomic_set_rel_int
  685 #define atomic_clear_32         atomic_clear_int
  686 #define atomic_clear_acq_32     atomic_clear_acq_int
  687 #define atomic_clear_rel_32     atomic_clear_rel_int
  688 #define atomic_add_32           atomic_add_int
  689 #define atomic_add_acq_32       atomic_add_acq_int
  690 #define atomic_add_rel_32       atomic_add_rel_int
  691 #define atomic_subtract_32      atomic_subtract_int
  692 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  693 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  694 #define atomic_load_acq_32      atomic_load_acq_int
  695 #define atomic_store_rel_32     atomic_store_rel_int
  696 #define atomic_cmpset_32        atomic_cmpset_int
  697 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  698 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  699 #define atomic_swap_32          atomic_swap_int
  700 #define atomic_readandclear_32  atomic_readandclear_int
  701 #define atomic_fetchadd_32      atomic_fetchadd_int
  702 #define atomic_testandset_32    atomic_testandset_int
  703 #define atomic_testandclear_32  atomic_testandclear_int
  704 
  705 /* Operations on pointers. */
  706 #define atomic_set_ptr(p, v) \
  707         atomic_set_int((volatile u_int *)(p), (u_int)(v))
  708 #define atomic_set_acq_ptr(p, v) \
  709         atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
  710 #define atomic_set_rel_ptr(p, v) \
  711         atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
  712 #define atomic_clear_ptr(p, v) \
  713         atomic_clear_int((volatile u_int *)(p), (u_int)(v))
  714 #define atomic_clear_acq_ptr(p, v) \
  715         atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
  716 #define atomic_clear_rel_ptr(p, v) \
  717         atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
  718 #define atomic_add_ptr(p, v) \
  719         atomic_add_int((volatile u_int *)(p), (u_int)(v))
  720 #define atomic_add_acq_ptr(p, v) \
  721         atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
  722 #define atomic_add_rel_ptr(p, v) \
  723         atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
  724 #define atomic_subtract_ptr(p, v) \
  725         atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
  726 #define atomic_subtract_acq_ptr(p, v) \
  727         atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
  728 #define atomic_subtract_rel_ptr(p, v) \
  729         atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
  730 #define atomic_load_acq_ptr(p) \
  731         atomic_load_acq_int((volatile u_int *)(p))
  732 #define atomic_store_rel_ptr(p, v) \
  733         atomic_store_rel_int((volatile u_int *)(p), (v))
  734 #define atomic_cmpset_ptr(dst, old, new) \
  735         atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
  736 #define atomic_cmpset_acq_ptr(dst, old, new) \
  737         atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
  738             (u_int)(new))
  739 #define atomic_cmpset_rel_ptr(dst, old, new) \
  740         atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
  741             (u_int)(new))
  742 #define atomic_swap_ptr(p, v) \
  743         atomic_swap_int((volatile u_int *)(p), (u_int)(v))
  744 #define atomic_readandclear_ptr(p) \
  745         atomic_readandclear_int((volatile u_int *)(p))
  746 
  747 #endif /* !WANT_FUNCTIONS */
  748 
  749 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 0103a2b014a97e5b784fa78d7c43bf51


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.