The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/include/xen/synch_bitops.h

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 #ifndef __XEN_SYNCH_BITOPS_H__
    2 #define __XEN_SYNCH_BITOPS_H__
    3 
    4 /*
    5  * Copyright 1992, Linus Torvalds.
    6  * Heavily modified to provide guaranteed strong synchronisation
    7  * when communicating with Xen or other guest OSes running on other CPUs.
    8  */
    9 
   10 
   11 #define ADDR (*(volatile long *) addr)
   12 
   13 static __inline__ void synch_set_bit(int nr, volatile void * addr)
   14 {
   15     __asm__ __volatile__ ( 
   16         "lock btsl %1,%0"
   17         : "=m" (ADDR) : "Ir" (nr) : "memory" );
   18 }
   19 
   20 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
   21 {
   22     __asm__ __volatile__ (
   23         "lock btrl %1,%0"
   24         : "=m" (ADDR) : "Ir" (nr) : "memory" );
   25 }
   26 
   27 static __inline__ void synch_change_bit(int nr, volatile void * addr)
   28 {
   29     __asm__ __volatile__ (
   30         "lock btcl %1,%0"
   31         : "=m" (ADDR) : "Ir" (nr) : "memory" );
   32 }
   33 
   34 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
   35 {
   36     int oldbit;
   37     __asm__ __volatile__ (
   38         "lock btsl %2,%1\n\tsbbl %0,%0"
   39         : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
   40     return oldbit;
   41 }
   42 
   43 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
   44 {
   45     int oldbit;
   46     __asm__ __volatile__ (
   47         "lock btrl %2,%1\n\tsbbl %0,%0"
   48         : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
   49     return oldbit;
   50 }
   51 
   52 static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
   53 {
   54     int oldbit;
   55 
   56     __asm__ __volatile__ (
   57         "lock btcl %2,%1\n\tsbbl %0,%0"
   58         : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
   59     return oldbit;
   60 }
   61 
   62 struct __synch_xchg_dummy { unsigned long a[100]; };
   63 #define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
   64 
   65 #define synch_cmpxchg(ptr, old, new) \
   66 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
   67                                      (unsigned long)(old), \
   68                                      (unsigned long)(new), \
   69                                      sizeof(*(ptr))))
   70 
   71 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
   72                                             unsigned long old,
   73                                             unsigned long new, int size)
   74 {
   75         unsigned long prev;
   76         switch (size) {
   77         case 1:
   78                 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
   79                                      : "=a"(prev)
   80                                      : "q"(new), "m"(*__synch_xg(ptr)),
   81                                        ""(old)
   82                                      : "memory");
   83                 return prev;
   84         case 2:
   85                 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
   86                                      : "=a"(prev)
   87                                      : "q"(new), "m"(*__synch_xg(ptr)),
   88                                        ""(old)
   89                                      : "memory");
   90                 return prev;
   91 #ifdef CONFIG_X86_64
   92         case 4:
   93                 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
   94                                      : "=a"(prev)
   95                                      : "q"(new), "m"(*__synch_xg(ptr)),
   96                                        ""(old)
   97                                      : "memory");
   98                 return prev;
   99         case 8:
  100                 __asm__ __volatile__("lock; cmpxchgq %1,%2"
  101                                      : "=a"(prev)
  102                                      : "q"(new), "m"(*__synch_xg(ptr)),
  103                                        ""(old)
  104                                      : "memory");
  105                 return prev;
  106 #else
  107         case 4:
  108                 __asm__ __volatile__("lock; cmpxchgl %1,%2"
  109                                      : "=a"(prev)
  110                                      : "q"(new), "m"(*__synch_xg(ptr)),
  111                                        ""(old)
  112                                      : "memory");
  113                 return prev;
  114 #endif
  115         }
  116         return old;
  117 }
  118 
  119 static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
  120 {
  121     return ((1UL << (nr & 31)) & 
  122             (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  123 }
  124 
  125 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
  126 {
  127     int oldbit;
  128     __asm__ __volatile__ (
  129         "btl %2,%1\n\tsbbl %0,%0"
  130         : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
  131     return oldbit;
  132 }
  133 
  134 #define synch_test_bit(nr,addr) \
  135 (__builtin_constant_p(nr) ? \
  136  synch_const_test_bit((nr),(addr)) : \
  137  synch_var_test_bit((nr),(addr)))
  138 
  139 #endif /* __XEN_SYNCH_BITOPS_H__ */

Cache object: 933b64f4f1b5273b3ddd4f08ce7f661d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.