1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
2 * implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 *
8 * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
9 */
10 #include <linux/rwsem.h>
11 #include <linux/sched.h>
12 #include <linux/module.h>
13
14 struct rwsem_waiter {
15 struct list_head list;
16 struct task_struct *task;
17 unsigned int flags;
18 #define RWSEM_WAITING_FOR_READ 0x00000001
19 #define RWSEM_WAITING_FOR_WRITE 0x00000002
20 };
21
22 #if RWSEM_DEBUG
23 void rwsemtrace(struct rw_semaphore *sem, const char *str)
24 {
25 if (sem->debug)
26 printk("[%d] %s({%d,%d})\n",
27 current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
28 }
29 #endif
30
31 /*
32 * initialise the semaphore
33 */
34 void init_rwsem(struct rw_semaphore *sem)
35 {
36 sem->activity = 0;
37 spin_lock_init(&sem->wait_lock);
38 INIT_LIST_HEAD(&sem->wait_list);
39 #if RWSEM_DEBUG
40 sem->debug = 0;
41 #endif
42 }
43
44 /*
45 * handle the lock being released whilst there are processes blocked on it that can now run
46 * - if we come here, then:
47 * - the 'active count' _reached_ zero
48 * - the 'waiting count' is non-zero
49 * - the spinlock must be held by the caller
50 * - woken process blocks are discarded from the list after having flags zeroised
51 */
52 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
53 {
54 struct rwsem_waiter *waiter;
55 int woken;
56
57 rwsemtrace(sem,"Entering __rwsem_do_wake");
58
59 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
60
61 /* try to grant a single write lock if there's a writer at the front of the queue
62 * - we leave the 'waiting count' incremented to signify potential contention
63 */
64 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
65 sem->activity = -1;
66 list_del(&waiter->list);
67 waiter->flags = 0;
68 wake_up_process(waiter->task);
69 goto out;
70 }
71
72 /* grant an infinite number of read locks to the readers at the front of the queue */
73 woken = 0;
74 do {
75 list_del(&waiter->list);
76 waiter->flags = 0;
77 wake_up_process(waiter->task);
78 woken++;
79 if (list_empty(&sem->wait_list))
80 break;
81 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
82 } while (waiter->flags&RWSEM_WAITING_FOR_READ);
83
84 sem->activity += woken;
85
86 out:
87 rwsemtrace(sem,"Leaving __rwsem_do_wake");
88 return sem;
89 }
90
91 /*
92 * wake a single writer
93 */
94 static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
95 {
96 struct rwsem_waiter *waiter;
97
98 sem->activity = -1;
99
100 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
101 list_del(&waiter->list);
102
103 waiter->flags = 0;
104 wake_up_process(waiter->task);
105 return sem;
106 }
107
108 /*
109 * get a read lock on the semaphore
110 */
111 void __down_read(struct rw_semaphore *sem)
112 {
113 struct rwsem_waiter waiter;
114 struct task_struct *tsk;
115
116 rwsemtrace(sem,"Entering __down_read");
117
118 spin_lock(&sem->wait_lock);
119
120 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
121 /* granted */
122 sem->activity++;
123 spin_unlock(&sem->wait_lock);
124 goto out;
125 }
126
127 tsk = current;
128 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
129
130 /* set up my own style of waitqueue */
131 waiter.task = tsk;
132 waiter.flags = RWSEM_WAITING_FOR_READ;
133
134 list_add_tail(&waiter.list,&sem->wait_list);
135
136 /* we don't need to touch the semaphore struct anymore */
137 spin_unlock(&sem->wait_lock);
138
139 /* wait to be given the lock */
140 for (;;) {
141 if (!waiter.flags)
142 break;
143 schedule();
144 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
145 }
146
147 tsk->state = TASK_RUNNING;
148
149 out:
150 rwsemtrace(sem,"Leaving __down_read");
151 }
152
153 /*
154 * trylock for reading -- returns 1 if successful, 0 if contention
155 */
156 int __down_read_trylock(struct rw_semaphore *sem)
157 {
158 int ret = 0;
159 rwsemtrace(sem,"Entering __down_read_trylock");
160
161 spin_lock(&sem->wait_lock);
162
163 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
164 /* granted */
165 sem->activity++;
166 ret = 1;
167 }
168
169 spin_unlock(&sem->wait_lock);
170
171 rwsemtrace(sem,"Leaving __down_read_trylock");
172 return ret;
173 }
174
175 /*
176 * get a write lock on the semaphore
177 * - note that we increment the waiting count anyway to indicate an exclusive lock
178 */
179 void __down_write(struct rw_semaphore *sem)
180 {
181 struct rwsem_waiter waiter;
182 struct task_struct *tsk;
183
184 rwsemtrace(sem,"Entering __down_write");
185
186 spin_lock(&sem->wait_lock);
187
188 if (sem->activity==0 && list_empty(&sem->wait_list)) {
189 /* granted */
190 sem->activity = -1;
191 spin_unlock(&sem->wait_lock);
192 goto out;
193 }
194
195 tsk = current;
196 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
197
198 /* set up my own style of waitqueue */
199 waiter.task = tsk;
200 waiter.flags = RWSEM_WAITING_FOR_WRITE;
201
202 list_add_tail(&waiter.list,&sem->wait_list);
203
204 /* we don't need to touch the semaphore struct anymore */
205 spin_unlock(&sem->wait_lock);
206
207 /* wait to be given the lock */
208 for (;;) {
209 if (!waiter.flags)
210 break;
211 schedule();
212 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
213 }
214
215 tsk->state = TASK_RUNNING;
216
217 out:
218 rwsemtrace(sem,"Leaving __down_write");
219 }
220
221 /*
222 * trylock for writing -- returns 1 if successful, 0 if contention
223 */
224 int __down_write_trylock(struct rw_semaphore *sem)
225 {
226 int ret = 0;
227 rwsemtrace(sem,"Entering __down_write_trylock");
228
229 spin_lock(&sem->wait_lock);
230
231 if (sem->activity==0 && list_empty(&sem->wait_list)) {
232 /* granted */
233 sem->activity = -1;
234 ret = 1;
235 }
236
237 spin_unlock(&sem->wait_lock);
238
239 rwsemtrace(sem,"Leaving __down_write_trylock");
240 return ret;
241 }
242
243 /*
244 * release a read lock on the semaphore
245 */
246 void __up_read(struct rw_semaphore *sem)
247 {
248 rwsemtrace(sem,"Entering __up_read");
249
250 spin_lock(&sem->wait_lock);
251
252 if (--sem->activity==0 && !list_empty(&sem->wait_list))
253 sem = __rwsem_wake_one_writer(sem);
254
255 spin_unlock(&sem->wait_lock);
256
257 rwsemtrace(sem,"Leaving __up_read");
258 }
259
260 /*
261 * release a write lock on the semaphore
262 */
263 void __up_write(struct rw_semaphore *sem)
264 {
265 rwsemtrace(sem,"Entering __up_write");
266
267 spin_lock(&sem->wait_lock);
268
269 sem->activity = 0;
270 if (!list_empty(&sem->wait_list))
271 sem = __rwsem_do_wake(sem);
272
273 spin_unlock(&sem->wait_lock);
274
275 rwsemtrace(sem,"Leaving __up_write");
276 }
277
278 EXPORT_SYMBOL(init_rwsem);
279 EXPORT_SYMBOL(__down_read);
280 EXPORT_SYMBOL(__down_write);
281 EXPORT_SYMBOL(__up_read);
282 EXPORT_SYMBOL(__up_write);
283 #if RWSEM_DEBUG
284 EXPORT_SYMBOL(rwsemtrace);
285 #endif
Cache object: 6570c563b50e61a9166ea524d6cacf2e
|