1 #ifndef _ALPHA_RWSEM_H
2 #define _ALPHA_RWSEM_H
3
4 /*
5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
7 */
8
9 #ifndef _LINUX_RWSEM_H
10 #error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
11 #endif
12
13 #ifdef __KERNEL__
14
15 #include <linux/compiler.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18
19 struct rwsem_waiter;
20
21 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
22 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
23 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
24
25 /*
26 * the semaphore definition
27 */
28 struct rw_semaphore {
29 long count;
30 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
31 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
32 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
33 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
34 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
35 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
36 spinlock_t wait_lock;
37 struct list_head wait_list;
38 #if RWSEM_DEBUG
39 int debug;
40 #endif
41 };
42
43 #if RWSEM_DEBUG
44 #define __RWSEM_DEBUG_INIT , 0
45 #else
46 #define __RWSEM_DEBUG_INIT /* */
47 #endif
48
49 #define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
51 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
52
53 #define DECLARE_RWSEM(name) \
54 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
55
56 static inline void init_rwsem(struct rw_semaphore *sem)
57 {
58 sem->count = RWSEM_UNLOCKED_VALUE;
59 spin_lock_init(&sem->wait_lock);
60 INIT_LIST_HEAD(&sem->wait_list);
61 #if RWSEM_DEBUG
62 sem->debug = 0;
63 #endif
64 }
65
66 static inline void __down_read(struct rw_semaphore *sem)
67 {
68 long oldcount;
69 #ifndef CONFIG_SMP
70 oldcount = sem->count;
71 sem->count += RWSEM_ACTIVE_READ_BIAS;
72 #else
73 long temp;
74 __asm__ __volatile__(
75 "1: ldq_l %0,%1\n"
76 " addq %0,%3,%2\n"
77 " stq_c %2,%1\n"
78 " beq %2,2f\n"
79 " mb\n"
80 ".subsection 2\n"
81 "2: br 1b\n"
82 ".previous"
83 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
84 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
85 #endif
86 if (__builtin_expect(oldcount < 0, 0))
87 rwsem_down_read_failed(sem);
88 }
89
90 /*
91 * trylock for reading -- returns 1 if successful, 0 if contention
92 */
93 static inline int __down_read_trylock(struct rw_semaphore *sem)
94 {
95 long old, new, res;
96
97 res = sem->count;
98 do {
99 new = res + RWSEM_ACTIVE_READ_BIAS;
100 if (new <= 0)
101 break;
102 old = res;
103 res = cmpxchg(&sem->count, old, new);
104 } while (res != old);
105 return res >= 0 ? 1 : 0;
106 }
107
108 static inline void __down_write(struct rw_semaphore *sem)
109 {
110 long oldcount;
111 #ifndef CONFIG_SMP
112 oldcount = sem->count;
113 sem->count += RWSEM_ACTIVE_WRITE_BIAS;
114 #else
115 long temp;
116 __asm__ __volatile__(
117 "1: ldq_l %0,%1\n"
118 " addq %0,%3,%2\n"
119 " stq_c %2,%1\n"
120 " beq %2,2f\n"
121 " mb\n"
122 ".subsection 2\n"
123 "2: br 1b\n"
124 ".previous"
125 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
126 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
127 #endif
128 if (__builtin_expect(oldcount, 0))
129 rwsem_down_write_failed(sem);
130 }
131
132 /*
133 * trylock for writing -- returns 1 if successful, 0 if contention
134 */
135 static inline int __down_write_trylock(struct rw_semaphore *sem)
136 {
137 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
138 RWSEM_ACTIVE_WRITE_BIAS);
139 if (ret == RWSEM_UNLOCKED_VALUE)
140 return 1;
141 return 0;
142 }
143
144 static inline void __up_read(struct rw_semaphore *sem)
145 {
146 long oldcount;
147 #ifndef CONFIG_SMP
148 oldcount = sem->count;
149 sem->count -= RWSEM_ACTIVE_READ_BIAS;
150 #else
151 long temp;
152 __asm__ __volatile__(
153 " mb\n"
154 "1: ldq_l %0,%1\n"
155 " subq %0,%3,%2\n"
156 " stq_c %2,%1\n"
157 " beq %2,2f\n"
158 ".subsection 2\n"
159 "2: br 1b\n"
160 ".previous"
161 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
162 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
163 #endif
164 if (__builtin_expect(oldcount < 0, 0))
165 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
166 rwsem_wake(sem);
167 }
168
169 static inline void __up_write(struct rw_semaphore *sem)
170 {
171 long count;
172 #ifndef CONFIG_SMP
173 sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
174 count = sem->count;
175 #else
176 long temp;
177 __asm__ __volatile__(
178 " mb\n"
179 "1: ldq_l %0,%1\n"
180 " subq %0,%3,%2\n"
181 " stq_c %2,%1\n"
182 " beq %2,2f\n"
183 " subq %0,%3,%0\n"
184 ".subsection 2\n"
185 "2: br 1b\n"
186 ".previous"
187 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
188 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
189 #endif
190 if (__builtin_expect(count, 0))
191 if ((int)count == 0)
192 rwsem_wake(sem);
193 }
194
195 static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
196 {
197 #ifndef CONFIG_SMP
198 sem->count += val;
199 #else
200 long temp;
201 __asm__ __volatile__(
202 "1: ldq_l %0,%1\n"
203 " addq %0,%2,%0\n"
204 " stq_c %0,%1\n"
205 " beq %0,2f\n"
206 ".subsection 2\n"
207 "2: br 1b\n"
208 ".previous"
209 :"=&r" (temp), "=m" (sem->count)
210 :"Ir" (val), "m" (sem->count));
211 #endif
212 }
213
214 static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
215 {
216 #ifndef CONFIG_SMP
217 sem->count += val;
218 return sem->count;
219 #else
220 long ret, temp;
221 __asm__ __volatile__(
222 "1: ldq_l %0,%1\n"
223 " addq %0,%3,%2\n"
224 " addq %0,%3,%0\n"
225 " stq_c %2,%1\n"
226 " beq %2,2f\n"
227 ".subsection 2\n"
228 "2: br 1b\n"
229 ".previous"
230 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
231 :"Ir" (val), "m" (sem->count));
232
233 return ret;
234 #endif
235 }
236
237 #endif /* __KERNEL__ */
238 #endif /* _ALPHA_RWSEM_H */
Cache object: ea5872d5ebe827a5287e1859b4d986b1
|