FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_umtx.c
1 /*-
2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/limits.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/signalvar.h>
39 #include <sys/sysent.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/sx.h>
43 #include <sys/thr.h>
44 #include <sys/umtx.h>
45
46 struct umtx_q {
47 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */
48 TAILQ_HEAD(, thread) uq_tdq; /* List of threads blocked here. */
49 struct umtx *uq_umtx; /* Pointer key component. */
50 pid_t uq_pid; /* Pid key component. */
51 };
52
53 #define UMTX_QUEUES 128
54 #define UMTX_HASH(pid, umtx) \
55 (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES)
56
57 LIST_HEAD(umtx_head, umtx_q);
58 static struct umtx_head queues[UMTX_QUEUES];
59 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
60
61 static struct mtx umtx_lock;
62 MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
63
64 #define UMTX_LOCK() mtx_lock(&umtx_lock);
65 #define UMTX_UNLOCK() mtx_unlock(&umtx_lock);
66
67 #define UMTX_CONTESTED LONG_MIN
68
69 static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
70 static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
71
72 static struct umtx_q *
73 umtx_lookup(struct thread *td, struct umtx *umtx)
74 {
75 struct umtx_head *head;
76 struct umtx_q *uq;
77 pid_t pid;
78
79 pid = td->td_proc->p_pid;
80
81 head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)];
82
83 LIST_FOREACH(uq, head, uq_next) {
84 if (uq->uq_pid == pid && uq->uq_umtx == umtx)
85 return (uq);
86 }
87
88 return (NULL);
89 }
90
91 /*
92 * Insert a thread onto the umtx queue.
93 */
94 static struct umtx_q *
95 umtx_insert(struct thread *td, struct umtx *umtx)
96 {
97 struct umtx_head *head;
98 struct umtx_q *uq;
99 pid_t pid;
100
101 pid = td->td_proc->p_pid;
102
103 if ((uq = umtx_lookup(td, umtx)) == NULL) {
104 struct umtx_q *ins;
105
106 UMTX_UNLOCK();
107 ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
108 UMTX_LOCK();
109
110 /*
111 * Some one else could have succeeded while we were blocked
112 * waiting on memory.
113 */
114 if ((uq = umtx_lookup(td, umtx)) == NULL) {
115 head = &queues[UMTX_HASH(pid, umtx)];
116 uq = ins;
117 uq->uq_pid = pid;
118 uq->uq_umtx = umtx;
119 LIST_INSERT_HEAD(head, uq, uq_next);
120 TAILQ_INIT(&uq->uq_tdq);
121 } else
122 free(ins, M_UMTX);
123 }
124
125 /*
126 * Insert us onto the end of the TAILQ.
127 */
128 TAILQ_INSERT_TAIL(&uq->uq_tdq, td, td_umtx);
129
130 return (uq);
131 }
132
133 static void
134 umtx_remove(struct umtx_q *uq, struct thread *td)
135 {
136 TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
137
138 if (TAILQ_EMPTY(&uq->uq_tdq)) {
139 LIST_REMOVE(uq, uq_next);
140 free(uq, M_UMTX);
141 }
142 }
143
144 int
145 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
146 /* struct umtx *umtx */
147 {
148 struct umtx_q *uq;
149 struct umtx *umtx;
150 intptr_t owner;
151 intptr_t old;
152 int error;
153
154 uq = NULL;
155
156 /*
157 * Care must be exercised when dealing with this structure. It
158 * can fault on any access.
159 */
160 umtx = uap->umtx;
161
162 for (;;) {
163 /*
164 * Try the uncontested case. This should be done in userland.
165 */
166 owner = casuptr((intptr_t *)&umtx->u_owner,
167 UMTX_UNOWNED, td->td_tid);
168
169 /* The address was invalid. */
170 if (owner == -1)
171 return (EFAULT);
172
173 /* The acquire succeeded. */
174 if (owner == UMTX_UNOWNED)
175 return (0);
176
177 /* If no one owns it but it is contested try to acquire it. */
178 if (owner == UMTX_CONTESTED) {
179 owner = casuptr((intptr_t *)&umtx->u_owner,
180 UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED);
181
182 /* The address was invalid. */
183 if (owner == -1)
184 return (EFAULT);
185
186 if (owner == UMTX_CONTESTED)
187 return (0);
188
189 /* If this failed the lock has changed, restart. */
190 continue;
191 }
192
193
194 UMTX_LOCK();
195 uq = umtx_insert(td, umtx);
196 UMTX_UNLOCK();
197
198 /*
199 * Set the contested bit so that a release in user space
200 * knows to use the system call for unlock. If this fails
201 * either some one else has acquired the lock or it has been
202 * released.
203 */
204 old = casuptr((intptr_t *)&umtx->u_owner, owner,
205 owner | UMTX_CONTESTED);
206
207 /* The address was invalid. */
208 if (old == -1) {
209 UMTX_LOCK();
210 umtx_remove(uq, td);
211 UMTX_UNLOCK();
212 return (EFAULT);
213 }
214
215 /*
216 * We set the contested bit, sleep. Otherwise the lock changed
217 * and we need to retry or we lost a race to the thread
218 * unlocking the umtx.
219 */
220 PROC_LOCK(td->td_proc);
221 if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0)
222 error = msleep(td, &td->td_proc->p_mtx,
223 td->td_priority | PCATCH, "umtx", 0);
224 else
225 error = 0;
226 mtx_lock_spin(&sched_lock);
227 td->td_flags &= ~TDF_UMTXWAKEUP;
228 mtx_unlock_spin(&sched_lock);
229 PROC_UNLOCK(td->td_proc);
230
231 UMTX_LOCK();
232 umtx_remove(uq, td);
233 UMTX_UNLOCK();
234
235 /*
236 * If we caught a signal we might have to retry or exit
237 * immediately.
238 */
239 if (error)
240 return (error);
241 }
242
243 return (0);
244 }
245
246 int
247 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
248 /* struct umtx *umtx */
249 {
250 struct thread *blocked;
251 struct umtx *umtx;
252 struct umtx_q *uq;
253 intptr_t owner;
254 intptr_t old;
255
256 umtx = uap->umtx;
257
258 /*
259 * Make sure we own this mtx.
260 *
261 * XXX Need a {fu,su}ptr this is not correct on arch where
262 * sizeof(intptr_t) != sizeof(long).
263 */
264 if ((owner = fuword(&umtx->u_owner)) == -1)
265 return (EFAULT);
266
267 if ((owner & ~UMTX_CONTESTED) != td->td_tid)
268 return (EPERM);
269
270 /* We should only ever be in here for contested locks */
271 if ((owner & UMTX_CONTESTED) == 0)
272 return (EINVAL);
273 blocked = NULL;
274
275 /*
276 * When unlocking the umtx, it must be marked as unowned if
277 * there is zero or one thread only waiting for it.
278 * Otherwise, it must be marked as contested.
279 */
280 UMTX_LOCK();
281 uq = umtx_lookup(td, umtx);
282 if (uq == NULL ||
283 (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
284 TAILQ_NEXT(blocked, td_umtx) == NULL)) {
285 UMTX_UNLOCK();
286 old = casuptr((intptr_t *)&umtx->u_owner, owner,
287 UMTX_UNOWNED);
288 if (old == -1)
289 return (EFAULT);
290 if (old != owner)
291 return (EINVAL);
292
293 /*
294 * Recheck the umtx queue to make sure another thread
295 * didn't put itself on it after it was unlocked.
296 */
297 UMTX_LOCK();
298 uq = umtx_lookup(td, umtx);
299 if (uq != NULL &&
300 ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
301 TAILQ_NEXT(blocked, td_umtx) != NULL)) {
302 UMTX_UNLOCK();
303 old = casuptr((intptr_t *)&umtx->u_owner,
304 UMTX_UNOWNED, UMTX_CONTESTED);
305 } else {
306 UMTX_UNLOCK();
307 }
308 } else {
309 UMTX_UNLOCK();
310 old = casuptr((intptr_t *)&umtx->u_owner,
311 owner, UMTX_CONTESTED);
312 if (old != -1 && old != owner)
313 return (EINVAL);
314 }
315
316 if (old == -1)
317 return (EFAULT);
318
319 /*
320 * If there is a thread waiting on the umtx, wake it up.
321 */
322 if (blocked != NULL) {
323 PROC_LOCK(blocked->td_proc);
324 mtx_lock_spin(&sched_lock);
325 blocked->td_flags |= TDF_UMTXWAKEUP;
326 mtx_unlock_spin(&sched_lock);
327 PROC_UNLOCK(blocked->td_proc);
328 wakeup(blocked);
329 }
330
331 return (0);
332 }
Cache object: db0b87327f079f2f64528b39d1e9d0cb
|