FreeBSD/Linux Kernel Cross Reference
sys/lib/rwsem.c
1 /* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 */
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
9
10 struct rwsem_waiter {
11 struct list_head list;
12 struct task_struct *task;
13 unsigned int flags;
14 #define RWSEM_WAITING_FOR_READ 0x00000001
15 #define RWSEM_WAITING_FOR_WRITE 0x00000002
16 };
17
18 #if RWSEM_DEBUG
19 #undef rwsemtrace
20 void rwsemtrace(struct rw_semaphore *sem, const char *str)
21 {
22 printk("sem=%p\n",sem);
23 printk("(sem)=%08lx\n",sem->count);
24 if (sem->debug)
25 printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
26 }
27 #endif
28
29 /*
30 * handle the lock being released whilst there are processes blocked on it that can now run
31 * - if we come here, then:
32 * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
33 * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
34 * - there must be someone on the queue
35 * - the spinlock must be held by the caller
36 * - woken process blocks are discarded from the list after having flags zeroised
37 */
38 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
39 {
40 struct rwsem_waiter *waiter;
41 struct list_head *next;
42 signed long oldcount;
43 int woken, loop;
44
45 rwsemtrace(sem,"Entering __rwsem_do_wake");
46
47 /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
48 try_again:
49 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
50 if (oldcount & RWSEM_ACTIVE_MASK)
51 goto undo;
52
53 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
54
55 /* try to grant a single write lock if there's a writer at the front of the queue
56 * - note we leave the 'active part' of the count incremented by 1 and the waiting part
57 * incremented by 0x00010000
58 */
59 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
60 goto readers_only;
61
62 list_del(&waiter->list);
63 waiter->flags = 0;
64 wake_up_process(waiter->task);
65 goto out;
66
67 /* grant an infinite number of read locks to the readers at the front of the queue
68 * - note we increment the 'active part' of the count by the number of readers (less one
69 * for the activity decrement we've already done) before waking any processes up
70 */
71 readers_only:
72 woken = 0;
73 do {
74 woken++;
75
76 if (waiter->list.next==&sem->wait_list)
77 break;
78
79 waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
80
81 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
82
83 loop = woken;
84 woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
85 woken -= RWSEM_ACTIVE_BIAS;
86 rwsem_atomic_add(woken,sem);
87
88 next = sem->wait_list.next;
89 for (; loop>0; loop--) {
90 waiter = list_entry(next,struct rwsem_waiter,list);
91 next = waiter->list.next;
92 waiter->flags = 0;
93 wake_up_process(waiter->task);
94 }
95
96 sem->wait_list.next = next;
97 next->prev = &sem->wait_list;
98
99 out:
100 rwsemtrace(sem,"Leaving __rwsem_do_wake");
101 return sem;
102
103 /* undo the change to count, but check for a transition 1->0 */
104 undo:
105 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
106 goto out;
107 goto try_again;
108 }
109
110 /*
111 * wait for a lock to be granted
112 */
113 static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
114 struct rwsem_waiter *waiter,
115 signed long adjustment)
116 {
117 struct task_struct *tsk = current;
118 signed long count;
119
120 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
121
122 /* set up my own style of waitqueue */
123 spin_lock(&sem->wait_lock);
124 waiter->task = tsk;
125
126 list_add_tail(&waiter->list,&sem->wait_list);
127
128 /* note that we're now waiting on the lock, but no longer actively read-locking */
129 count = rwsem_atomic_update(adjustment,sem);
130
131 /* if there are no longer active locks, wake the front queued process(es) up
132 * - it might even be this process, since the waker takes a more active part
133 */
134 if (!(count & RWSEM_ACTIVE_MASK))
135 sem = __rwsem_do_wake(sem);
136
137 spin_unlock(&sem->wait_lock);
138
139 /* wait to be given the lock */
140 for (;;) {
141 if (!waiter->flags)
142 break;
143 schedule();
144 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
145 }
146
147 tsk->state = TASK_RUNNING;
148
149 return sem;
150 }
151
152 /*
153 * wait for the read lock to be granted
154 */
155 struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
156 {
157 struct rwsem_waiter waiter;
158
159 rwsemtrace(sem,"Entering rwsem_down_read_failed");
160
161 waiter.flags = RWSEM_WAITING_FOR_READ;
162 rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
163
164 rwsemtrace(sem,"Leaving rwsem_down_read_failed");
165 return sem;
166 }
167
168 /*
169 * wait for the write lock to be granted
170 */
171 struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
172 {
173 struct rwsem_waiter waiter;
174
175 rwsemtrace(sem,"Entering rwsem_down_write_failed");
176
177 waiter.flags = RWSEM_WAITING_FOR_WRITE;
178 rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
179
180 rwsemtrace(sem,"Leaving rwsem_down_write_failed");
181 return sem;
182 }
183
184 /*
185 * handle waking up a waiter on the semaphore
186 * - up_read has decremented the active part of the count if we come here
187 */
188 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
189 {
190 rwsemtrace(sem,"Entering rwsem_wake");
191
192 spin_lock(&sem->wait_lock);
193
194 /* do nothing if list empty */
195 if (!list_empty(&sem->wait_list))
196 sem = __rwsem_do_wake(sem);
197
198 spin_unlock(&sem->wait_lock);
199
200 rwsemtrace(sem,"Leaving rwsem_wake");
201
202 return sem;
203 }
204
205 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
206 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
207 EXPORT_SYMBOL_NOVERS(rwsem_wake);
208 #if RWSEM_DEBUG
209 EXPORT_SYMBOL(rwsemtrace);
210 #endif
Cache object: 9c5581b809c7258b9809cef3390b071c
|