1 #ifndef __XEN_SYNCH_BITOPS_H__
2 #define __XEN_SYNCH_BITOPS_H__
3
4 /*
5 * Copyright 1992, Linus Torvalds.
6 * Heavily modified to provide guaranteed strong synchronisation
7 * when communicating with Xen or other guest OSes running on other CPUs.
8 */
9
10
11 #define ADDR (*(volatile long *) addr)
12
13 static __inline__ void synch_set_bit(int nr, volatile void * addr)
14 {
15 __asm__ __volatile__ (
16 "lock btsl %1,%0"
17 : "=m" (ADDR) : "Ir" (nr) : "memory" );
18 }
19
20 static __inline__ void synch_clear_bit(int nr, volatile void * addr)
21 {
22 __asm__ __volatile__ (
23 "lock btrl %1,%0"
24 : "=m" (ADDR) : "Ir" (nr) : "memory" );
25 }
26
27 static __inline__ void synch_change_bit(int nr, volatile void * addr)
28 {
29 __asm__ __volatile__ (
30 "lock btcl %1,%0"
31 : "=m" (ADDR) : "Ir" (nr) : "memory" );
32 }
33
34 static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
35 {
36 int oldbit;
37 __asm__ __volatile__ (
38 "lock btsl %2,%1\n\tsbbl %0,%0"
39 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
40 return oldbit;
41 }
42
43 static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
44 {
45 int oldbit;
46 __asm__ __volatile__ (
47 "lock btrl %2,%1\n\tsbbl %0,%0"
48 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
49 return oldbit;
50 }
51
52 static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
53 {
54 int oldbit;
55
56 __asm__ __volatile__ (
57 "lock btcl %2,%1\n\tsbbl %0,%0"
58 : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
59 return oldbit;
60 }
61
62 struct __synch_xchg_dummy { unsigned long a[100]; };
63 #define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
64
65 #define synch_cmpxchg(ptr, old, new) \
66 ((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
67 (unsigned long)(old), \
68 (unsigned long)(new), \
69 sizeof(*(ptr))))
70
71 static inline unsigned long __synch_cmpxchg(volatile void *ptr,
72 unsigned long old,
73 unsigned long new, int size)
74 {
75 unsigned long prev;
76 switch (size) {
77 case 1:
78 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
79 : "=a"(prev)
80 : "q"(new), "m"(*__synch_xg(ptr)),
81 ""(old)
82 : "memory");
83 return prev;
84 case 2:
85 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
86 : "=a"(prev)
87 : "q"(new), "m"(*__synch_xg(ptr)),
88 ""(old)
89 : "memory");
90 return prev;
91 #ifdef CONFIG_X86_64
92 case 4:
93 __asm__ __volatile__("lock; cmpxchgl %k1,%2"
94 : "=a"(prev)
95 : "q"(new), "m"(*__synch_xg(ptr)),
96 ""(old)
97 : "memory");
98 return prev;
99 case 8:
100 __asm__ __volatile__("lock; cmpxchgq %1,%2"
101 : "=a"(prev)
102 : "q"(new), "m"(*__synch_xg(ptr)),
103 ""(old)
104 : "memory");
105 return prev;
106 #else
107 case 4:
108 __asm__ __volatile__("lock; cmpxchgl %1,%2"
109 : "=a"(prev)
110 : "q"(new), "m"(*__synch_xg(ptr)),
111 ""(old)
112 : "memory");
113 return prev;
114 #endif
115 }
116 return old;
117 }
118
119 static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
120 {
121 return ((1UL << (nr & 31)) &
122 (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
123 }
124
125 static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
126 {
127 int oldbit;
128 __asm__ __volatile__ (
129 "btl %2,%1\n\tsbbl %0,%0"
130 : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
131 return oldbit;
132 }
133
134 #define synch_test_bit(nr,addr) \
135 (__builtin_constant_p(nr) ? \
136 synch_const_test_bit((nr),(addr)) : \
137 synch_var_test_bit((nr),(addr)))
138
139 #endif /* __XEN_SYNCH_BITOPS_H__ */
Cache object: 1bfbe1f41700a92652d36ef3cd4dd168
|