The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/include/atomic.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1998 Doug Rabson
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  * $FreeBSD$
   29  */
   30 #ifndef _MACHINE_ATOMIC_H_
   31 #define _MACHINE_ATOMIC_H_
   32 
   33 #ifndef _SYS_CDEFS_H_
   34 #error this file needs sys/cdefs.h as a prerequisite
   35 #endif
   36 
   37 #include <sys/atomic_common.h>
   38 
   39 #ifdef _KERNEL
   40 #include <machine/md_var.h>
   41 #include <machine/specialreg.h>
   42 #endif
   43 
   44 #ifndef __OFFSETOF_MONITORBUF
   45 /*
   46  * __OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
   47  *
   48  * The open-coded number is used instead of the symbolic expression to
   49  * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
   50  * An assertion in i386/vm_machdep.c ensures that the value is correct.
   51  */
   52 #define __OFFSETOF_MONITORBUF   0x80
   53 
   54 static __inline void
   55 __mbk(void)
   56 {
   57 
   58         __asm __volatile("lock; addl $0,%%fs:%0"
   59             : "+m" (*(u_int *)__OFFSETOF_MONITORBUF) : : "memory", "cc");
   60 }
   61 
   62 static __inline void
   63 __mbu(void)
   64 {
   65 
   66         __asm __volatile("lock; addl $0,(%%esp)" : : : "memory", "cc");
   67 }
   68 #endif
   69 
   70 /*
   71  * Various simple operations on memory, each of which is atomic in the
   72  * presence of interrupts and multiple processors.
   73  *
   74  * atomic_set_char(P, V)        (*(u_char *)(P) |= (V))
   75  * atomic_clear_char(P, V)      (*(u_char *)(P) &= ~(V))
   76  * atomic_add_char(P, V)        (*(u_char *)(P) += (V))
   77  * atomic_subtract_char(P, V)   (*(u_char *)(P) -= (V))
   78  *
   79  * atomic_set_short(P, V)       (*(u_short *)(P) |= (V))
   80  * atomic_clear_short(P, V)     (*(u_short *)(P) &= ~(V))
   81  * atomic_add_short(P, V)       (*(u_short *)(P) += (V))
   82  * atomic_subtract_short(P, V)  (*(u_short *)(P) -= (V))
   83  *
   84  * atomic_set_int(P, V)         (*(u_int *)(P) |= (V))
   85  * atomic_clear_int(P, V)       (*(u_int *)(P) &= ~(V))
   86  * atomic_add_int(P, V)         (*(u_int *)(P) += (V))
   87  * atomic_subtract_int(P, V)    (*(u_int *)(P) -= (V))
   88  * atomic_swap_int(P, V)        (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
   89  * atomic_readandclear_int(P)   (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
   90  *
   91  * atomic_set_long(P, V)        (*(u_long *)(P) |= (V))
   92  * atomic_clear_long(P, V)      (*(u_long *)(P) &= ~(V))
   93  * atomic_add_long(P, V)        (*(u_long *)(P) += (V))
   94  * atomic_subtract_long(P, V)   (*(u_long *)(P) -= (V))
   95  * atomic_swap_long(P, V)       (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
   96  * atomic_readandclear_long(P)  (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
   97  */
   98 
   99 /*
  100  * The above functions are expanded inline in the statically-linked
  101  * kernel.  Lock prefixes are generated if an SMP kernel is being
  102  * built.
  103  *
  104  * Kernel modules call real functions which are built into the kernel.
  105  * This allows kernel modules to be portable between UP and SMP systems.
  106  */
  107 #if !defined(__GNUCLIKE_ASM)
  108 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)                     \
  109 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v);  \
  110 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
  111 
  112 int     atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
  113 int     atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
  114 int     atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
  115 int     atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
  116 int     atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
  117             u_short src);
  118 int     atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
  119 u_int   atomic_fetchadd_int(volatile u_int *p, u_int v);
  120 int     atomic_testandset_int(volatile u_int *p, u_int v);
  121 int     atomic_testandclear_int(volatile u_int *p, u_int v);
  122 void    atomic_thread_fence_acq(void);
  123 void    atomic_thread_fence_acq_rel(void);
  124 void    atomic_thread_fence_rel(void);
  125 void    atomic_thread_fence_seq_cst(void);
  126 
  127 #define ATOMIC_LOAD(TYPE)                                       \
  128 u_##TYPE        atomic_load_acq_##TYPE(volatile u_##TYPE *p)
  129 #define ATOMIC_STORE(TYPE)                                      \
  130 void            atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
  131 
  132 int             atomic_cmpset_64(volatile uint64_t *, uint64_t, uint64_t);
  133 int             atomic_fcmpset_64(volatile uint64_t *, uint64_t *, uint64_t);
  134 uint64_t        atomic_load_acq_64(volatile uint64_t *);
  135 void            atomic_store_rel_64(volatile uint64_t *, uint64_t);
  136 uint64_t        atomic_swap_64(volatile uint64_t *, uint64_t);
  137 uint64_t        atomic_fetchadd_64(volatile uint64_t *, uint64_t);
  138 void            atomic_add_64(volatile uint64_t *, uint64_t);
  139 void            atomic_subtract_64(volatile uint64_t *, uint64_t);
  140 
  141 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
  142 
  143 /*
  144  * For userland, always use lock prefixes so that the binaries will run
  145  * on both SMP and !SMP systems.
  146  */
  147 #if defined(SMP) || !defined(_KERNEL) || defined(KLD_MODULE)
  148 #define MPLOCKED        "lock ; "
  149 #else
  150 #define MPLOCKED
  151 #endif
  152 
  153 /*
  154  * The assembly is volatilized to avoid code chunk removal by the compiler.
  155  * GCC aggressively reorders operations and memory clobbering is necessary
  156  * in order to avoid that for memory barriers.
  157  */
  158 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V)             \
  159 static __inline void                                    \
  160 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  161 {                                                       \
  162         __asm __volatile(MPLOCKED OP                    \
  163         : "+m" (*p)                                     \
  164         : CONS (V)                                      \
  165         : "cc");                                        \
  166 }                                                       \
  167                                                         \
  168 static __inline void                                    \
  169 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
  170 {                                                       \
  171         __asm __volatile(MPLOCKED OP                    \
  172         : "+m" (*p)                                     \
  173         : CONS (V)                                      \
  174         : "memory", "cc");                              \
  175 }                                                       \
  176 struct __hack
  177 
  178 /*
  179  * Atomic compare and set, used by the mutex functions.
  180  *
  181  * cmpset:
  182  *      if (*dst == expect)
  183  *              *dst = src
  184  *
  185  * fcmpset:
  186  *      if (*dst == *expect)
  187  *              *dst = src
  188  *      else
  189  *              *expect = *dst
  190  *
  191  * Returns 0 on failure, non-zero on success.
  192  */
  193 #define ATOMIC_CMPSET(TYPE, CONS)                       \
  194 static __inline int                                     \
  195 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
  196 {                                                       \
  197         u_char res;                                     \
  198                                                         \
  199         __asm __volatile(                               \
  200         "       " MPLOCKED "            "               \
  201         "       cmpxchg %3,%1 ;         "               \
  202         "       sete    %0 ;            "               \
  203         "# atomic_cmpset_" #TYPE "      "               \
  204         : "=q" (res),                   /* 0 */         \
  205           "+m" (*dst),                  /* 1 */         \
  206           "+a" (expect)                 /* 2 */         \
  207         : CONS (src)                    /* 3 */         \
  208         : "memory", "cc");                              \
  209         return (res);                                   \
  210 }                                                       \
  211                                                         \
  212 static __inline int                                     \
  213 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
  214 {                                                       \
  215         u_char res;                                     \
  216                                                         \
  217         __asm __volatile(                               \
  218         "       " MPLOCKED "            "               \
  219         "       cmpxchg %3,%1 ;         "               \
  220         "       sete    %0 ;            "               \
  221         "# atomic_fcmpset_" #TYPE "     "               \
  222         : "=q" (res),                   /* 0 */         \
  223           "+m" (*dst),                  /* 1 */         \
  224           "+a" (*expect)                /* 2 */         \
  225         : CONS (src)                    /* 3 */         \
  226         : "memory", "cc");                              \
  227         return (res);                                   \
  228 }
  229 
  230 ATOMIC_CMPSET(char, "q");
  231 ATOMIC_CMPSET(short, "r");
  232 ATOMIC_CMPSET(int, "r");
  233 
  234 /*
  235  * Atomically add the value of v to the integer pointed to by p and return
  236  * the previous value of *p.
  237  */
  238 static __inline u_int
  239 atomic_fetchadd_int(volatile u_int *p, u_int v)
  240 {
  241 
  242         __asm __volatile(
  243         "       " MPLOCKED "            "
  244         "       xaddl   %0,%1 ;         "
  245         "# atomic_fetchadd_int"
  246         : "+r" (v),                     /* 0 */
  247           "+m" (*p)                     /* 1 */
  248         : : "cc");
  249         return (v);
  250 }
  251 
  252 static __inline int
  253 atomic_testandset_int(volatile u_int *p, u_int v)
  254 {
  255         u_char res;
  256 
  257         __asm __volatile(
  258         "       " MPLOCKED "            "
  259         "       btsl    %2,%1 ;         "
  260         "       setc    %0 ;            "
  261         "# atomic_testandset_int"
  262         : "=q" (res),                   /* 0 */
  263           "+m" (*p)                     /* 1 */
  264         : "Ir" (v & 0x1f)               /* 2 */
  265         : "cc");
  266         return (res);
  267 }
  268 
  269 static __inline int
  270 atomic_testandclear_int(volatile u_int *p, u_int v)
  271 {
  272         u_char res;
  273 
  274         __asm __volatile(
  275         "       " MPLOCKED "            "
  276         "       btrl    %2,%1 ;         "
  277         "       setc    %0 ;            "
  278         "# atomic_testandclear_int"
  279         : "=q" (res),                   /* 0 */
  280           "+m" (*p)                     /* 1 */
  281         : "Ir" (v & 0x1f)               /* 2 */
  282         : "cc");
  283         return (res);
  284 }
  285 
  286 /*
  287  * We assume that a = b will do atomic loads and stores.  Due to the
  288  * IA32 memory model, a simple store guarantees release semantics.
  289  *
  290  * However, a load may pass a store if they are performed on distinct
  291  * addresses, so we need Store/Load barrier for sequentially
  292  * consistent fences in SMP kernels.  We use "lock addl $0,mem" for a
  293  * Store/Load barrier, as recommended by the AMD Software Optimization
  294  * Guide, and not mfence.  In the kernel, we use a private per-cpu
  295  * cache line for "mem", to avoid introducing false data
  296  * dependencies.  In user space, we use the word at the top of the
  297  * stack.
  298  *
  299  * For UP kernels, however, the memory of the single processor is
  300  * always consistent, so we only need to stop the compiler from
  301  * reordering accesses in a way that violates the semantics of acquire
  302  * and release.
  303  */
  304 
  305 #if defined(_KERNEL)
  306 #if defined(SMP) || defined(KLD_MODULE)
  307 #define __storeload_barrier()   __mbk()
  308 #else /* _KERNEL && UP */
  309 #define __storeload_barrier()   __compiler_membar()
  310 #endif /* SMP */
  311 #else /* !_KERNEL */
  312 #define __storeload_barrier()   __mbu()
  313 #endif /* _KERNEL*/
  314 
  315 #define ATOMIC_LOAD(TYPE)                                       \
  316 static __inline u_##TYPE                                        \
  317 atomic_load_acq_##TYPE(volatile u_##TYPE *p)                    \
  318 {                                                               \
  319         u_##TYPE res;                                           \
  320                                                                 \
  321         res = *p;                                               \
  322         __compiler_membar();                                    \
  323         return (res);                                           \
  324 }                                                               \
  325 struct __hack
  326 
  327 #define ATOMIC_STORE(TYPE)                                      \
  328 static __inline void                                            \
  329 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)       \
  330 {                                                               \
  331                                                                 \
  332         __compiler_membar();                                    \
  333         *p = v;                                                 \
  334 }                                                               \
  335 struct __hack
  336 
  337 static __inline void
  338 atomic_thread_fence_acq(void)
  339 {
  340 
  341         __compiler_membar();
  342 }
  343 
  344 static __inline void
  345 atomic_thread_fence_rel(void)
  346 {
  347 
  348         __compiler_membar();
  349 }
  350 
  351 static __inline void
  352 atomic_thread_fence_acq_rel(void)
  353 {
  354 
  355         __compiler_membar();
  356 }
  357 
  358 static __inline void
  359 atomic_thread_fence_seq_cst(void)
  360 {
  361 
  362         __storeload_barrier();
  363 }
  364 
  365 #ifdef _KERNEL
  366 
  367 #ifdef WANT_FUNCTIONS
  368 int             atomic_cmpset_64_i386(volatile uint64_t *, uint64_t, uint64_t);
  369 int             atomic_cmpset_64_i586(volatile uint64_t *, uint64_t, uint64_t);
  370 uint64_t        atomic_load_acq_64_i386(volatile uint64_t *);
  371 uint64_t        atomic_load_acq_64_i586(volatile uint64_t *);
  372 void            atomic_store_rel_64_i386(volatile uint64_t *, uint64_t);
  373 void            atomic_store_rel_64_i586(volatile uint64_t *, uint64_t);
  374 uint64_t        atomic_swap_64_i386(volatile uint64_t *, uint64_t);
  375 uint64_t        atomic_swap_64_i586(volatile uint64_t *, uint64_t);
  376 #endif
  377 
  378 /* I486 does not support SMP or CMPXCHG8B. */
  379 static __inline int
  380 atomic_cmpset_64_i386(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  381 {
  382         volatile uint32_t *p;
  383         u_char res;
  384 
  385         p = (volatile uint32_t *)dst;
  386         __asm __volatile(
  387         "       pushfl ;                "
  388         "       cli ;                   "
  389         "       xorl    %1,%%eax ;      "
  390         "       xorl    %2,%%edx ;      "
  391         "       orl     %%edx,%%eax ;   "
  392         "       jne     1f ;            "
  393         "       movl    %4,%1 ;         "
  394         "       movl    %5,%2 ;         "
  395         "1:                             "
  396         "       sete    %3 ;            "
  397         "       popfl"
  398         : "+A" (expect),                /* 0 */
  399           "+m" (*p),                    /* 1 */
  400           "+m" (*(p + 1)),              /* 2 */
  401           "=q" (res)                    /* 3 */
  402         : "r" ((uint32_t)src),          /* 4 */
  403           "r" ((uint32_t)(src >> 32))   /* 5 */
  404         : "memory", "cc");
  405         return (res);
  406 }
  407 
  408 static __inline int
  409 atomic_fcmpset_64_i386(volatile uint64_t *dst, uint64_t *expect, uint64_t src)
  410 {
  411 
  412         if (atomic_cmpset_64_i386(dst, *expect, src)) {
  413                 return (1);
  414         } else {
  415                 *expect = *dst;
  416                 return (0);
  417         }
  418 }
  419 
  420 static __inline uint64_t
  421 atomic_load_acq_64_i386(volatile uint64_t *p)
  422 {
  423         volatile uint32_t *q;
  424         uint64_t res;
  425 
  426         q = (volatile uint32_t *)p;
  427         __asm __volatile(
  428         "       pushfl ;                "
  429         "       cli ;                   "
  430         "       movl    %1,%%eax ;      "
  431         "       movl    %2,%%edx ;      "
  432         "       popfl"
  433         : "=&A" (res)                   /* 0 */
  434         : "m" (*q),                     /* 1 */
  435           "m" (*(q + 1))                /* 2 */
  436         : "memory");
  437         return (res);
  438 }
  439 
  440 static __inline void
  441 atomic_store_rel_64_i386(volatile uint64_t *p, uint64_t v)
  442 {
  443         volatile uint32_t *q;
  444 
  445         q = (volatile uint32_t *)p;
  446         __asm __volatile(
  447         "       pushfl ;                "
  448         "       cli ;                   "
  449         "       movl    %%eax,%0 ;      "
  450         "       movl    %%edx,%1 ;      "
  451         "       popfl"
  452         : "=m" (*q),                    /* 0 */
  453           "=m" (*(q + 1))               /* 1 */
  454         : "A" (v)                       /* 2 */
  455         : "memory");
  456 }
  457 
  458 static __inline uint64_t
  459 atomic_swap_64_i386(volatile uint64_t *p, uint64_t v)
  460 {
  461         volatile uint32_t *q;
  462         uint64_t res;
  463 
  464         q = (volatile uint32_t *)p;
  465         __asm __volatile(
  466         "       pushfl ;                "
  467         "       cli ;                   "
  468         "       movl    %1,%%eax ;      "
  469         "       movl    %2,%%edx ;      "
  470         "       movl    %4,%2 ;         "
  471         "       movl    %3,%1 ;         "
  472         "       popfl"
  473         : "=&A" (res),                  /* 0 */
  474           "+m" (*q),                    /* 1 */
  475           "+m" (*(q + 1))               /* 2 */
  476         : "r" ((uint32_t)v),            /* 3 */
  477           "r" ((uint32_t)(v >> 32)));   /* 4 */
  478         return (res);
  479 }
  480 
  481 static __inline int
  482 atomic_cmpset_64_i586(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  483 {
  484         u_char res;
  485 
  486         __asm __volatile(
  487         "       " MPLOCKED "            "
  488         "       cmpxchg8b %1 ;          "
  489         "       sete    %0"
  490         : "=q" (res),                   /* 0 */
  491           "+m" (*dst),                  /* 1 */
  492           "+A" (expect)                 /* 2 */
  493         : "b" ((uint32_t)src),          /* 3 */
  494           "c" ((uint32_t)(src >> 32))   /* 4 */
  495         : "memory", "cc");
  496         return (res);
  497 }
  498 
  499 static __inline int
  500 atomic_fcmpset_64_i586(volatile uint64_t *dst, uint64_t *expect, uint64_t src)
  501 {
  502         u_char res;
  503 
  504         __asm __volatile(
  505         "       " MPLOCKED "            "
  506         "       cmpxchg8b %1 ;          "
  507         "       sete    %0"
  508         : "=q" (res),                   /* 0 */
  509           "+m" (*dst),                  /* 1 */
  510           "+A" (*expect)                /* 2 */
  511         : "b" ((uint32_t)src),          /* 3 */
  512           "c" ((uint32_t)(src >> 32))   /* 4 */
  513         : "memory", "cc");
  514         return (res);
  515 }
  516 
  517 static __inline uint64_t
  518 atomic_load_acq_64_i586(volatile uint64_t *p)
  519 {
  520         uint64_t res;
  521 
  522         __asm __volatile(
  523         "       movl    %%ebx,%%eax ;   "
  524         "       movl    %%ecx,%%edx ;   "
  525         "       " MPLOCKED "            "
  526         "       cmpxchg8b %1"
  527         : "=&A" (res),                  /* 0 */
  528           "+m" (*p)                     /* 1 */
  529         : : "memory", "cc");
  530         return (res);
  531 }
  532 
  533 static __inline void
  534 atomic_store_rel_64_i586(volatile uint64_t *p, uint64_t v)
  535 {
  536 
  537         __asm __volatile(
  538         "       movl    %%eax,%%ebx ;   "
  539         "       movl    %%edx,%%ecx ;   "
  540         "1:                             "
  541         "       " MPLOCKED "            "
  542         "       cmpxchg8b %0 ;          "
  543         "       jne     1b"
  544         : "+m" (*p),                    /* 0 */
  545           "+A" (v)                      /* 1 */
  546         : : "ebx", "ecx", "memory", "cc");
  547 }
  548 
  549 static __inline uint64_t
  550 atomic_swap_64_i586(volatile uint64_t *p, uint64_t v)
  551 {
  552 
  553         __asm __volatile(
  554         "       movl    %%eax,%%ebx ;   "
  555         "       movl    %%edx,%%ecx ;   "
  556         "1:                             "
  557         "       " MPLOCKED "            "
  558         "       cmpxchg8b %0 ;          "
  559         "       jne     1b"
  560         : "+m" (*p),                    /* 0 */
  561           "+A" (v)                      /* 1 */
  562         : : "ebx", "ecx", "memory", "cc");
  563         return (v);
  564 }
  565 
  566 static __inline int
  567 atomic_cmpset_64(volatile uint64_t *dst, uint64_t expect, uint64_t src)
  568 {
  569 
  570         if ((cpu_feature & CPUID_CX8) == 0)
  571                 return (atomic_cmpset_64_i386(dst, expect, src));
  572         else
  573                 return (atomic_cmpset_64_i586(dst, expect, src));
  574 }
  575 
  576 static __inline int
  577 atomic_fcmpset_64(volatile uint64_t *dst, uint64_t *expect, uint64_t src)
  578 {
  579 
  580         if ((cpu_feature & CPUID_CX8) == 0)
  581                 return (atomic_fcmpset_64_i386(dst, expect, src));
  582         else
  583                 return (atomic_fcmpset_64_i586(dst, expect, src));
  584 }
  585 
  586 static __inline uint64_t
  587 atomic_load_acq_64(volatile uint64_t *p)
  588 {
  589 
  590         if ((cpu_feature & CPUID_CX8) == 0)
  591                 return (atomic_load_acq_64_i386(p));
  592         else
  593                 return (atomic_load_acq_64_i586(p));
  594 }
  595 
  596 static __inline void
  597 atomic_store_rel_64(volatile uint64_t *p, uint64_t v)
  598 {
  599 
  600         if ((cpu_feature & CPUID_CX8) == 0)
  601                 atomic_store_rel_64_i386(p, v);
  602         else
  603                 atomic_store_rel_64_i586(p, v);
  604 }
  605 
  606 static __inline uint64_t
  607 atomic_swap_64(volatile uint64_t *p, uint64_t v)
  608 {
  609 
  610         if ((cpu_feature & CPUID_CX8) == 0)
  611                 return (atomic_swap_64_i386(p, v));
  612         else
  613                 return (atomic_swap_64_i586(p, v));
  614 }
  615 
  616 static __inline uint64_t
  617 atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
  618 {
  619 
  620         for (;;) {
  621                 uint64_t t = *p;
  622                 if (atomic_cmpset_64(p, t, t + v))
  623                         return (t);
  624         }
  625 }
  626 
  627 static __inline void
  628 atomic_add_64(volatile uint64_t *p, uint64_t v)
  629 {
  630         uint64_t t;
  631 
  632         for (;;) {
  633                 t = *p;
  634                 if (atomic_cmpset_64(p, t, t + v))
  635                         break;
  636         }
  637 }
  638 
  639 static __inline void
  640 atomic_subtract_64(volatile uint64_t *p, uint64_t v)
  641 {
  642         uint64_t t;
  643 
  644         for (;;) {
  645                 t = *p;
  646                 if (atomic_cmpset_64(p, t, t - v))
  647                         break;
  648         }
  649 }
  650 
  651 #endif /* _KERNEL */
  652 
  653 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
  654 
  655 ATOMIC_ASM(set,      char,  "orb %b1,%0",  "iq",  v);
  656 ATOMIC_ASM(clear,    char,  "andb %b1,%0", "iq", ~v);
  657 ATOMIC_ASM(add,      char,  "addb %b1,%0", "iq",  v);
  658 ATOMIC_ASM(subtract, char,  "subb %b1,%0", "iq",  v);
  659 
  660 ATOMIC_ASM(set,      short, "orw %w1,%0",  "ir",  v);
  661 ATOMIC_ASM(clear,    short, "andw %w1,%0", "ir", ~v);
  662 ATOMIC_ASM(add,      short, "addw %w1,%0", "ir",  v);
  663 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir",  v);
  664 
  665 ATOMIC_ASM(set,      int,   "orl %1,%0",   "ir",  v);
  666 ATOMIC_ASM(clear,    int,   "andl %1,%0",  "ir", ~v);
  667 ATOMIC_ASM(add,      int,   "addl %1,%0",  "ir",  v);
  668 ATOMIC_ASM(subtract, int,   "subl %1,%0",  "ir",  v);
  669 
  670 ATOMIC_ASM(set,      long,  "orl %1,%0",   "ir",  v);
  671 ATOMIC_ASM(clear,    long,  "andl %1,%0",  "ir", ~v);
  672 ATOMIC_ASM(add,      long,  "addl %1,%0",  "ir",  v);
  673 ATOMIC_ASM(subtract, long,  "subl %1,%0",  "ir",  v);
  674 
  675 #define ATOMIC_LOADSTORE(TYPE)                          \
  676         ATOMIC_LOAD(TYPE);                              \
  677         ATOMIC_STORE(TYPE)
  678 
  679 ATOMIC_LOADSTORE(char);
  680 ATOMIC_LOADSTORE(short);
  681 ATOMIC_LOADSTORE(int);
  682 ATOMIC_LOADSTORE(long);
  683 
  684 #undef ATOMIC_ASM
  685 #undef ATOMIC_LOAD
  686 #undef ATOMIC_STORE
  687 #undef ATOMIC_LOADSTORE
  688 
  689 #ifndef WANT_FUNCTIONS
  690 
  691 static __inline int
  692 atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
  693 {
  694 
  695         return (atomic_cmpset_int((volatile u_int *)dst, (u_int)expect,
  696             (u_int)src));
  697 }
  698 
  699 static __inline int
  700 atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src)
  701 {
  702 
  703         return (atomic_fcmpset_int((volatile u_int *)dst, (u_int *)expect,
  704             (u_int)src));
  705 }
  706 
  707 static __inline u_long
  708 atomic_fetchadd_long(volatile u_long *p, u_long v)
  709 {
  710 
  711         return (atomic_fetchadd_int((volatile u_int *)p, (u_int)v));
  712 }
  713 
  714 static __inline int
  715 atomic_testandset_long(volatile u_long *p, u_int v)
  716 {
  717 
  718         return (atomic_testandset_int((volatile u_int *)p, v));
  719 }
  720 
  721 static __inline int
  722 atomic_testandclear_long(volatile u_long *p, u_int v)
  723 {
  724 
  725         return (atomic_testandclear_int((volatile u_int *)p, v));
  726 }
  727 
  728 /* Read the current value and store a new value in the destination. */
  729 #ifdef __GNUCLIKE_ASM
  730 
  731 static __inline u_int
  732 atomic_swap_int(volatile u_int *p, u_int v)
  733 {
  734 
  735         __asm __volatile(
  736         "       xchgl   %1,%0 ;         "
  737         "# atomic_swap_int"
  738         : "+r" (v),                     /* 0 */
  739           "+m" (*p));                   /* 1 */
  740         return (v);
  741 }
  742 
  743 static __inline u_long
  744 atomic_swap_long(volatile u_long *p, u_long v)
  745 {
  746 
  747         return (atomic_swap_int((volatile u_int *)p, (u_int)v));
  748 }
  749 
  750 #else /* !__GNUCLIKE_ASM */
  751 
  752 u_int   atomic_swap_int(volatile u_int *p, u_int v);
  753 u_long  atomic_swap_long(volatile u_long *p, u_long v);
  754 
  755 #endif /* __GNUCLIKE_ASM */
  756 
  757 #define atomic_set_acq_char             atomic_set_barr_char
  758 #define atomic_set_rel_char             atomic_set_barr_char
  759 #define atomic_clear_acq_char           atomic_clear_barr_char
  760 #define atomic_clear_rel_char           atomic_clear_barr_char
  761 #define atomic_add_acq_char             atomic_add_barr_char
  762 #define atomic_add_rel_char             atomic_add_barr_char
  763 #define atomic_subtract_acq_char        atomic_subtract_barr_char
  764 #define atomic_subtract_rel_char        atomic_subtract_barr_char
  765 #define atomic_cmpset_acq_char          atomic_cmpset_char
  766 #define atomic_cmpset_rel_char          atomic_cmpset_char
  767 #define atomic_fcmpset_acq_char         atomic_fcmpset_char
  768 #define atomic_fcmpset_rel_char         atomic_fcmpset_char
  769 
  770 #define atomic_set_acq_short            atomic_set_barr_short
  771 #define atomic_set_rel_short            atomic_set_barr_short
  772 #define atomic_clear_acq_short          atomic_clear_barr_short
  773 #define atomic_clear_rel_short          atomic_clear_barr_short
  774 #define atomic_add_acq_short            atomic_add_barr_short
  775 #define atomic_add_rel_short            atomic_add_barr_short
  776 #define atomic_subtract_acq_short       atomic_subtract_barr_short
  777 #define atomic_subtract_rel_short       atomic_subtract_barr_short
  778 #define atomic_cmpset_acq_short         atomic_cmpset_short
  779 #define atomic_cmpset_rel_short         atomic_cmpset_short
  780 #define atomic_fcmpset_acq_short        atomic_fcmpset_short
  781 #define atomic_fcmpset_rel_short        atomic_fcmpset_short
  782 
  783 #define atomic_set_acq_int              atomic_set_barr_int
  784 #define atomic_set_rel_int              atomic_set_barr_int
  785 #define atomic_clear_acq_int            atomic_clear_barr_int
  786 #define atomic_clear_rel_int            atomic_clear_barr_int
  787 #define atomic_add_acq_int              atomic_add_barr_int
  788 #define atomic_add_rel_int              atomic_add_barr_int
  789 #define atomic_subtract_acq_int         atomic_subtract_barr_int
  790 #define atomic_subtract_rel_int         atomic_subtract_barr_int
  791 #define atomic_cmpset_acq_int           atomic_cmpset_int
  792 #define atomic_cmpset_rel_int           atomic_cmpset_int
  793 #define atomic_fcmpset_acq_int          atomic_fcmpset_int
  794 #define atomic_fcmpset_rel_int          atomic_fcmpset_int
  795 
  796 #define atomic_set_acq_long             atomic_set_barr_long
  797 #define atomic_set_rel_long             atomic_set_barr_long
  798 #define atomic_clear_acq_long           atomic_clear_barr_long
  799 #define atomic_clear_rel_long           atomic_clear_barr_long
  800 #define atomic_add_acq_long             atomic_add_barr_long
  801 #define atomic_add_rel_long             atomic_add_barr_long
  802 #define atomic_subtract_acq_long        atomic_subtract_barr_long
  803 #define atomic_subtract_rel_long        atomic_subtract_barr_long
  804 #define atomic_cmpset_acq_long          atomic_cmpset_long
  805 #define atomic_cmpset_rel_long          atomic_cmpset_long
  806 #define atomic_fcmpset_acq_long         atomic_fcmpset_long
  807 #define atomic_fcmpset_rel_long         atomic_fcmpset_long
  808 
  809 #define atomic_readandclear_int(p)      atomic_swap_int(p, 0)
  810 #define atomic_readandclear_long(p)     atomic_swap_long(p, 0)
  811 
  812 /* Operations on 8-bit bytes. */
  813 #define atomic_set_8            atomic_set_char
  814 #define atomic_set_acq_8        atomic_set_acq_char
  815 #define atomic_set_rel_8        atomic_set_rel_char
  816 #define atomic_clear_8          atomic_clear_char
  817 #define atomic_clear_acq_8      atomic_clear_acq_char
  818 #define atomic_clear_rel_8      atomic_clear_rel_char
  819 #define atomic_add_8            atomic_add_char
  820 #define atomic_add_acq_8        atomic_add_acq_char
  821 #define atomic_add_rel_8        atomic_add_rel_char
  822 #define atomic_subtract_8       atomic_subtract_char
  823 #define atomic_subtract_acq_8   atomic_subtract_acq_char
  824 #define atomic_subtract_rel_8   atomic_subtract_rel_char
  825 #define atomic_load_acq_8       atomic_load_acq_char
  826 #define atomic_store_rel_8      atomic_store_rel_char
  827 #define atomic_cmpset_8         atomic_cmpset_char
  828 #define atomic_cmpset_acq_8     atomic_cmpset_acq_char
  829 #define atomic_cmpset_rel_8     atomic_cmpset_rel_char
  830 #define atomic_fcmpset_8        atomic_fcmpset_char
  831 #define atomic_fcmpset_acq_8    atomic_fcmpset_acq_char
  832 #define atomic_fcmpset_rel_8    atomic_fcmpset_rel_char
  833 
  834 /* Operations on 16-bit words. */
  835 #define atomic_set_16           atomic_set_short
  836 #define atomic_set_acq_16       atomic_set_acq_short
  837 #define atomic_set_rel_16       atomic_set_rel_short
  838 #define atomic_clear_16         atomic_clear_short
  839 #define atomic_clear_acq_16     atomic_clear_acq_short
  840 #define atomic_clear_rel_16     atomic_clear_rel_short
  841 #define atomic_add_16           atomic_add_short
  842 #define atomic_add_acq_16       atomic_add_acq_short
  843 #define atomic_add_rel_16       atomic_add_rel_short
  844 #define atomic_subtract_16      atomic_subtract_short
  845 #define atomic_subtract_acq_16  atomic_subtract_acq_short
  846 #define atomic_subtract_rel_16  atomic_subtract_rel_short
  847 #define atomic_load_acq_16      atomic_load_acq_short
  848 #define atomic_store_rel_16     atomic_store_rel_short
  849 #define atomic_cmpset_16        atomic_cmpset_short
  850 #define atomic_cmpset_acq_16    atomic_cmpset_acq_short
  851 #define atomic_cmpset_rel_16    atomic_cmpset_rel_short
  852 #define atomic_fcmpset_16       atomic_fcmpset_short
  853 #define atomic_fcmpset_acq_16   atomic_fcmpset_acq_short
  854 #define atomic_fcmpset_rel_16   atomic_fcmpset_rel_short
  855 
  856 /* Operations on 32-bit double words. */
  857 #define atomic_set_32           atomic_set_int
  858 #define atomic_set_acq_32       atomic_set_acq_int
  859 #define atomic_set_rel_32       atomic_set_rel_int
  860 #define atomic_clear_32         atomic_clear_int
  861 #define atomic_clear_acq_32     atomic_clear_acq_int
  862 #define atomic_clear_rel_32     atomic_clear_rel_int
  863 #define atomic_add_32           atomic_add_int
  864 #define atomic_add_acq_32       atomic_add_acq_int
  865 #define atomic_add_rel_32       atomic_add_rel_int
  866 #define atomic_subtract_32      atomic_subtract_int
  867 #define atomic_subtract_acq_32  atomic_subtract_acq_int
  868 #define atomic_subtract_rel_32  atomic_subtract_rel_int
  869 #define atomic_load_acq_32      atomic_load_acq_int
  870 #define atomic_store_rel_32     atomic_store_rel_int
  871 #define atomic_cmpset_32        atomic_cmpset_int
  872 #define atomic_cmpset_acq_32    atomic_cmpset_acq_int
  873 #define atomic_cmpset_rel_32    atomic_cmpset_rel_int
  874 #define atomic_fcmpset_32       atomic_fcmpset_int
  875 #define atomic_fcmpset_acq_32   atomic_fcmpset_acq_int
  876 #define atomic_fcmpset_rel_32   atomic_fcmpset_rel_int
  877 #define atomic_swap_32          atomic_swap_int
  878 #define atomic_readandclear_32  atomic_readandclear_int
  879 #define atomic_fetchadd_32      atomic_fetchadd_int
  880 #define atomic_testandset_32    atomic_testandset_int
  881 #define atomic_testandclear_32  atomic_testandclear_int
  882 
  883 #ifdef _KERNEL
  884 /* Operations on 64-bit quad words. */
  885 #define atomic_cmpset_acq_64 atomic_cmpset_64
  886 #define atomic_cmpset_rel_64 atomic_cmpset_64
  887 #define atomic_fcmpset_acq_64 atomic_fcmpset_64
  888 #define atomic_fcmpset_rel_64 atomic_fcmpset_64
  889 #define atomic_fetchadd_acq_64  atomic_fetchadd_64
  890 #define atomic_fetchadd_rel_64  atomic_fetchadd_64
  891 #define atomic_add_acq_64 atomic_add_64
  892 #define atomic_add_rel_64 atomic_add_64
  893 #define atomic_subtract_acq_64 atomic_subtract_64
  894 #define atomic_subtract_rel_64 atomic_subtract_64
  895 #define atomic_load_64 atomic_load_acq_64
  896 #define atomic_store_64 atomic_store_rel_64
  897 #endif
  898 
  899 /* Operations on pointers. */
  900 #define atomic_set_ptr(p, v) \
  901         atomic_set_int((volatile u_int *)(p), (u_int)(v))
  902 #define atomic_set_acq_ptr(p, v) \
  903         atomic_set_acq_int((volatile u_int *)(p), (u_int)(v))
  904 #define atomic_set_rel_ptr(p, v) \
  905         atomic_set_rel_int((volatile u_int *)(p), (u_int)(v))
  906 #define atomic_clear_ptr(p, v) \
  907         atomic_clear_int((volatile u_int *)(p), (u_int)(v))
  908 #define atomic_clear_acq_ptr(p, v) \
  909         atomic_clear_acq_int((volatile u_int *)(p), (u_int)(v))
  910 #define atomic_clear_rel_ptr(p, v) \
  911         atomic_clear_rel_int((volatile u_int *)(p), (u_int)(v))
  912 #define atomic_add_ptr(p, v) \
  913         atomic_add_int((volatile u_int *)(p), (u_int)(v))
  914 #define atomic_add_acq_ptr(p, v) \
  915         atomic_add_acq_int((volatile u_int *)(p), (u_int)(v))
  916 #define atomic_add_rel_ptr(p, v) \
  917         atomic_add_rel_int((volatile u_int *)(p), (u_int)(v))
  918 #define atomic_subtract_ptr(p, v) \
  919         atomic_subtract_int((volatile u_int *)(p), (u_int)(v))
  920 #define atomic_subtract_acq_ptr(p, v) \
  921         atomic_subtract_acq_int((volatile u_int *)(p), (u_int)(v))
  922 #define atomic_subtract_rel_ptr(p, v) \
  923         atomic_subtract_rel_int((volatile u_int *)(p), (u_int)(v))
  924 #define atomic_load_acq_ptr(p) \
  925         atomic_load_acq_int((volatile u_int *)(p))
  926 #define atomic_store_rel_ptr(p, v) \
  927         atomic_store_rel_int((volatile u_int *)(p), (v))
  928 #define atomic_cmpset_ptr(dst, old, new) \
  929         atomic_cmpset_int((volatile u_int *)(dst), (u_int)(old), (u_int)(new))
  930 #define atomic_cmpset_acq_ptr(dst, old, new) \
  931         atomic_cmpset_acq_int((volatile u_int *)(dst), (u_int)(old), \
  932             (u_int)(new))
  933 #define atomic_cmpset_rel_ptr(dst, old, new) \
  934         atomic_cmpset_rel_int((volatile u_int *)(dst), (u_int)(old), \
  935             (u_int)(new))
  936 #define atomic_fcmpset_ptr(dst, old, new) \
  937         atomic_fcmpset_int((volatile u_int *)(dst), (u_int *)(old), (u_int)(new))
  938 #define atomic_fcmpset_acq_ptr(dst, old, new) \
  939         atomic_fcmpset_acq_int((volatile u_int *)(dst), (u_int *)(old), \
  940             (u_int)(new))
  941 #define atomic_fcmpset_rel_ptr(dst, old, new) \
  942         atomic_fcmpset_rel_int((volatile u_int *)(dst), (u_int *)(old), \
  943             (u_int)(new))
  944 #define atomic_swap_ptr(p, v) \
  945         atomic_swap_int((volatile u_int *)(p), (u_int)(v))
  946 #define atomic_readandclear_ptr(p) \
  947         atomic_readandclear_int((volatile u_int *)(p))
  948 
  949 #endif /* !WANT_FUNCTIONS */
  950 
  951 #if defined(_KERNEL)
  952 #define mb()    __mbk()
  953 #define wmb()   __mbk()
  954 #define rmb()   __mbk()
  955 #else
  956 #define mb()    __mbu()
  957 #define wmb()   __mbu()
  958 #define rmb()   __mbu()
  959 #endif
  960 
  961 #endif /* !_MACHINE_ATOMIC_H_ */

Cache object: 1900766d1aa864946ca2d1647039d9fc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.