FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_umtx.c
1 /*-
2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/6.2/sys/kern/kern_umtx.c 164286 2006-11-14 20:42:41Z cvs2svn $");
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/limits.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sysent.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/eventhandler.h>
42 #include <sys/thr.h>
43 #include <sys/umtx.h>
44
45 #include <vm/vm.h>
46 #include <vm/vm_param.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_object.h>
50
51 #define UMTX_PRIVATE 0
52 #define UMTX_SHARED 1
53
54 #define UMTX_STATIC_SHARED
55
56 struct umtx_key {
57 int type;
58 union {
59 struct {
60 vm_object_t object;
61 long offset;
62 } shared;
63 struct {
64 struct umtx *umtx;
65 long pid;
66 } private;
67 struct {
68 void *ptr;
69 long word;
70 } both;
71 } info;
72 };
73
74 struct umtx_q {
75 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */
76 struct umtx_key uq_key; /* Umtx key. */
77 struct thread *uq_thread; /* The thread waits on. */
78 LIST_ENTRY(umtx_q) uq_rqnext; /* Linked list for requeuing. */
79 vm_offset_t uq_addr; /* Umtx's virtual address. */
80 };
81
82 LIST_HEAD(umtx_head, umtx_q);
83 struct umtxq_chain {
84 struct mtx uc_lock; /* Lock for this chain. */
85 struct umtx_head uc_queue; /* List of sleep queues. */
86 #define UCF_BUSY 0x01
87 #define UCF_WANT 0x02
88 int uc_flags;
89 };
90
91 #define GOLDEN_RATIO_PRIME 2654404609U
92 #define UMTX_CHAINS 128
93 #define UMTX_SHIFTS (__WORD_BIT - 7)
94
95 static struct umtxq_chain umtxq_chains[UMTX_CHAINS];
96 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
97
98 static void umtxq_init_chains(void *);
99 static int umtxq_hash(struct umtx_key *key);
100 static struct mtx *umtxq_mtx(int chain);
101 static void umtxq_lock(struct umtx_key *key);
102 static void umtxq_unlock(struct umtx_key *key);
103 static void umtxq_busy(struct umtx_key *key);
104 static void umtxq_unbusy(struct umtx_key *key);
105 static void umtxq_insert(struct umtx_q *uq);
106 static void umtxq_remove(struct umtx_q *uq);
107 static int umtxq_sleep(struct thread *td, struct umtx_key *key,
108 int prio, const char *wmesg, int timo);
109 static int umtxq_count(struct umtx_key *key);
110 static int umtxq_signal(struct umtx_key *key, int nr_wakeup);
111 #ifdef UMTX_DYNAMIC_SHARED
112 static void fork_handler(void *arg, struct proc *p1, struct proc *p2,
113 int flags);
114 #endif
115 static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
116 static int umtx_key_get(struct thread *td, struct umtx *umtx,
117 struct umtx_key *key);
118 static void umtx_key_release(struct umtx_key *key);
119
120 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL);
121
122 struct umtx_q *
123 umtxq_alloc(void)
124 {
125 return (malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK));
126 }
127
128 void
129 umtxq_free(struct umtx_q *uq)
130 {
131 free(uq, M_UMTX);
132 }
133
134 static void
135 umtxq_init_chains(void *arg __unused)
136 {
137 int i;
138
139 for (i = 0; i < UMTX_CHAINS; ++i) {
140 mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL,
141 MTX_DEF | MTX_DUPOK);
142 LIST_INIT(&umtxq_chains[i].uc_queue);
143 umtxq_chains[i].uc_flags = 0;
144 }
145 #ifdef UMTX_DYNAMIC_SHARED
146 EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000);
147 #endif
148 }
149
150 static inline int
151 umtxq_hash(struct umtx_key *key)
152 {
153 unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word;
154 return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS);
155 }
156
157 static inline int
158 umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2)
159 {
160 return (k1->type == k2->type &&
161 k1->info.both.ptr == k2->info.both.ptr &&
162 k1->info.both.word == k2->info.both.word);
163 }
164
165 static inline struct mtx *
166 umtxq_mtx(int chain)
167 {
168 return (&umtxq_chains[chain].uc_lock);
169 }
170
171 static inline void
172 umtxq_busy(struct umtx_key *key)
173 {
174 int chain = umtxq_hash(key);
175
176 mtx_assert(umtxq_mtx(chain), MA_OWNED);
177 while (umtxq_chains[chain].uc_flags & UCF_BUSY) {
178 umtxq_chains[chain].uc_flags |= UCF_WANT;
179 msleep(&umtxq_chains[chain], umtxq_mtx(chain),
180 0, "umtxq_busy", 0);
181 }
182 umtxq_chains[chain].uc_flags |= UCF_BUSY;
183 }
184
185 static inline void
186 umtxq_unbusy(struct umtx_key *key)
187 {
188 int chain = umtxq_hash(key);
189
190 mtx_assert(umtxq_mtx(chain), MA_OWNED);
191 KASSERT(umtxq_chains[chain].uc_flags & UCF_BUSY, ("not busy"));
192 umtxq_chains[chain].uc_flags &= ~UCF_BUSY;
193 if (umtxq_chains[chain].uc_flags & UCF_WANT) {
194 umtxq_chains[chain].uc_flags &= ~UCF_WANT;
195 wakeup(&umtxq_chains[chain]);
196 }
197 }
198
199 static inline void
200 umtxq_lock(struct umtx_key *key)
201 {
202 int chain = umtxq_hash(key);
203 mtx_lock(umtxq_mtx(chain));
204 }
205
206 static inline void
207 umtxq_unlock(struct umtx_key *key)
208 {
209 int chain = umtxq_hash(key);
210 mtx_unlock(umtxq_mtx(chain));
211 }
212
213 /*
214 * Insert a thread onto the umtx queue.
215 */
216 static inline void
217 umtxq_insert(struct umtx_q *uq)
218 {
219 struct umtx_head *head;
220 int chain = umtxq_hash(&uq->uq_key);
221
222 mtx_assert(umtxq_mtx(chain), MA_OWNED);
223 head = &umtxq_chains[chain].uc_queue;
224 LIST_INSERT_HEAD(head, uq, uq_next);
225 mtx_lock_spin(&sched_lock);
226 uq->uq_thread->td_flags |= TDF_UMTXQ;
227 mtx_unlock_spin(&sched_lock);
228 }
229
230 /*
231 * Remove thread from the umtx queue.
232 */
233 static inline void
234 umtxq_remove(struct umtx_q *uq)
235 {
236 mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED);
237 if (uq->uq_thread->td_flags & TDF_UMTXQ) {
238 LIST_REMOVE(uq, uq_next);
239 /* turning off TDF_UMTXQ should be the last thing. */
240 mtx_lock_spin(&sched_lock);
241 uq->uq_thread->td_flags &= ~TDF_UMTXQ;
242 mtx_unlock_spin(&sched_lock);
243 }
244 }
245
246 static int
247 umtxq_count(struct umtx_key *key)
248 {
249 struct umtx_q *uq;
250 struct umtx_head *head;
251 int chain, count = 0;
252
253 chain = umtxq_hash(key);
254 mtx_assert(umtxq_mtx(chain), MA_OWNED);
255 head = &umtxq_chains[chain].uc_queue;
256 LIST_FOREACH(uq, head, uq_next) {
257 if (umtx_key_match(&uq->uq_key, key)) {
258 if (++count > 1)
259 break;
260 }
261 }
262 return (count);
263 }
264
265 static int
266 umtxq_signal(struct umtx_key *key, int n_wake)
267 {
268 struct umtx_q *uq, *next;
269 struct umtx_head *head;
270 struct thread *blocked = NULL;
271 int chain, ret;
272
273 ret = 0;
274 chain = umtxq_hash(key);
275 mtx_assert(umtxq_mtx(chain), MA_OWNED);
276 head = &umtxq_chains[chain].uc_queue;
277 for (uq = LIST_FIRST(head); uq; uq = next) {
278 next = LIST_NEXT(uq, uq_next);
279 if (umtx_key_match(&uq->uq_key, key)) {
280 blocked = uq->uq_thread;
281 umtxq_remove(uq);
282 wakeup(blocked);
283 if (++ret >= n_wake)
284 break;
285 }
286 }
287 return (ret);
288 }
289
290 static inline int
291 umtxq_sleep(struct thread *td, struct umtx_key *key, int priority,
292 const char *wmesg, int timo)
293 {
294 int chain = umtxq_hash(key);
295 int error = msleep(td, umtxq_mtx(chain), priority, wmesg, timo);
296 if (error == EWOULDBLOCK)
297 error = ETIMEDOUT;
298 return (error);
299 }
300
301 static int
302 umtx_key_get(struct thread *td, struct umtx *umtx, struct umtx_key *key)
303 {
304 #if defined(UMTX_DYNAMIC_SHARED) || defined(UMTX_STATIC_SHARED)
305 vm_map_t map;
306 vm_map_entry_t entry;
307 vm_pindex_t pindex;
308 vm_prot_t prot;
309 boolean_t wired;
310
311 map = &td->td_proc->p_vmspace->vm_map;
312 if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
313 &entry, &key->info.shared.object, &pindex, &prot,
314 &wired) != KERN_SUCCESS) {
315 return EFAULT;
316 }
317 #endif
318
319 #if defined(UMTX_DYNAMIC_SHARED)
320 key->type = UMTX_SHARED;
321 key->info.shared.offset = entry->offset + entry->start -
322 (vm_offset_t)umtx;
323 /*
324 * Add object reference, if we don't do this, a buggy application
325 * deallocates the object, the object will be reused by other
326 * applications, then unlock will wake wrong thread.
327 */
328 vm_object_reference(key->info.shared.object);
329 vm_map_lookup_done(map, entry);
330 #elif defined(UMTX_STATIC_SHARED)
331 if (VM_INHERIT_SHARE == entry->inheritance) {
332 key->type = UMTX_SHARED;
333 key->info.shared.offset = entry->offset + entry->start -
334 (vm_offset_t)umtx;
335 vm_object_reference(key->info.shared.object);
336 } else {
337 key->type = UMTX_PRIVATE;
338 key->info.private.umtx = umtx;
339 key->info.private.pid = td->td_proc->p_pid;
340 }
341 vm_map_lookup_done(map, entry);
342 #else
343 key->type = UMTX_PRIVATE;
344 key->info.private.umtx = umtx;
345 key->info.private.pid = td->td_proc->p_pid;
346 #endif
347 return (0);
348 }
349
350 static inline void
351 umtx_key_release(struct umtx_key *key)
352 {
353 if (key->type == UMTX_SHARED)
354 vm_object_deallocate(key->info.shared.object);
355 }
356
357 static inline int
358 umtxq_queue_me(struct thread *td, struct umtx *umtx, struct umtx_q *uq)
359 {
360 int error;
361
362 if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0)
363 return (error);
364
365 uq->uq_addr = (vm_offset_t)umtx;
366 uq->uq_thread = td;
367 umtxq_lock(&uq->uq_key);
368 /* hmm, for condition variable, we don't need busy flag. */
369 umtxq_busy(&uq->uq_key);
370 umtxq_insert(uq);
371 umtxq_unbusy(&uq->uq_key);
372 umtxq_unlock(&uq->uq_key);
373 return (0);
374 }
375
376 #if defined(UMTX_DYNAMIC_SHARED)
377 static void
378 fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags)
379 {
380 vm_map_t map;
381 vm_map_entry_t entry;
382 vm_object_t object;
383 vm_pindex_t pindex;
384 vm_prot_t prot;
385 boolean_t wired;
386 struct umtx_key key;
387 LIST_HEAD(, umtx_q) workq;
388 struct umtx_q *uq;
389 struct thread *td;
390 int onq;
391
392 LIST_INIT(&workq);
393
394 /* Collect threads waiting on umtxq */
395 PROC_LOCK(p1);
396 FOREACH_THREAD_IN_PROC(p1, td) {
397 if (td->td_flags & TDF_UMTXQ) {
398 uq = td->td_umtxq;
399 if (uq)
400 LIST_INSERT_HEAD(&workq, uq, uq_rqnext);
401 }
402 }
403 PROC_UNLOCK(p1);
404
405 LIST_FOREACH(uq, &workq, uq_rqnext) {
406 map = &p1->p_vmspace->vm_map;
407 if (vm_map_lookup(&map, uq->uq_addr, VM_PROT_WRITE,
408 &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
409 continue;
410 }
411 key.type = UMTX_SHARED;
412 key.info.shared.object = object;
413 key.info.shared.offset = entry->offset + entry->start -
414 uq->uq_addr;
415 if (umtx_key_match(&key, &uq->uq_key)) {
416 vm_map_lookup_done(map, entry);
417 continue;
418 }
419
420 umtxq_lock(&uq->uq_key);
421 umtxq_busy(&uq->uq_key);
422 if (uq->uq_thread->td_flags & TDF_UMTXQ) {
423 umtxq_remove(uq);
424 onq = 1;
425 } else
426 onq = 0;
427 umtxq_unbusy(&uq->uq_key);
428 umtxq_unlock(&uq->uq_key);
429 if (onq) {
430 vm_object_deallocate(uq->uq_key.info.shared.object);
431 uq->uq_key = key;
432 umtxq_lock(&uq->uq_key);
433 umtxq_busy(&uq->uq_key);
434 umtxq_insert(uq);
435 umtxq_unbusy(&uq->uq_key);
436 umtxq_unlock(&uq->uq_key);
437 vm_object_reference(uq->uq_key.info.shared.object);
438 }
439 vm_map_lookup_done(map, entry);
440 }
441 }
442 #endif
443
444 static int
445 _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
446 {
447 struct umtx_q *uq;
448 intptr_t owner;
449 intptr_t old;
450 int error = 0;
451
452 uq = td->td_umtxq;
453 /*
454 * Care must be exercised when dealing with umtx structure. It
455 * can fault on any access.
456 */
457
458 for (;;) {
459 /*
460 * Try the uncontested case. This should be done in userland.
461 */
462 owner = casuptr((intptr_t *)&umtx->u_owner,
463 UMTX_UNOWNED, id);
464
465 /* The acquire succeeded. */
466 if (owner == UMTX_UNOWNED)
467 return (0);
468
469 /* The address was invalid. */
470 if (owner == -1)
471 return (EFAULT);
472
473 /* If no one owns it but it is contested try to acquire it. */
474 if (owner == UMTX_CONTESTED) {
475 owner = casuptr((intptr_t *)&umtx->u_owner,
476 UMTX_CONTESTED, id | UMTX_CONTESTED);
477
478 if (owner == UMTX_CONTESTED)
479 return (0);
480
481 /* The address was invalid. */
482 if (owner == -1)
483 return (EFAULT);
484
485 /* If this failed the lock has changed, restart. */
486 continue;
487 }
488
489 /*
490 * If we caught a signal, we have retried and now
491 * exit immediately.
492 */
493 if (error || (error = umtxq_queue_me(td, umtx, uq)) != 0)
494 return (error);
495
496 /*
497 * Set the contested bit so that a release in user space
498 * knows to use the system call for unlock. If this fails
499 * either some one else has acquired the lock or it has been
500 * released.
501 */
502 old = casuptr((intptr_t *)&umtx->u_owner, owner,
503 owner | UMTX_CONTESTED);
504
505 /* The address was invalid. */
506 if (old == -1) {
507 umtxq_lock(&uq->uq_key);
508 umtxq_busy(&uq->uq_key);
509 umtxq_remove(uq);
510 umtxq_unbusy(&uq->uq_key);
511 umtxq_unlock(&uq->uq_key);
512 umtx_key_release(&uq->uq_key);
513 return (EFAULT);
514 }
515
516 /*
517 * We set the contested bit, sleep. Otherwise the lock changed
518 * and we need to retry or we lost a race to the thread
519 * unlocking the umtx.
520 */
521 umtxq_lock(&uq->uq_key);
522 if (old == owner && (td->td_flags & TDF_UMTXQ)) {
523 error = umtxq_sleep(td, &uq->uq_key, PCATCH,
524 "umtx", timo);
525 }
526 umtxq_busy(&uq->uq_key);
527 umtxq_remove(uq);
528 umtxq_unbusy(&uq->uq_key);
529 umtxq_unlock(&uq->uq_key);
530 umtx_key_release(&uq->uq_key);
531 }
532
533 return (0);
534 }
535
536 static int
537 do_lock(struct thread *td, struct umtx *umtx, long id,
538 struct timespec *timeout)
539 {
540 struct timespec ts, ts2, ts3;
541 struct timeval tv;
542 int error;
543
544 if (timeout == NULL) {
545 error = _do_lock(td, umtx, id, 0);
546 } else {
547 getnanouptime(&ts);
548 timespecadd(&ts, timeout);
549 TIMESPEC_TO_TIMEVAL(&tv, timeout);
550 for (;;) {
551 error = _do_lock(td, umtx, id, tvtohz(&tv));
552 if (error != ETIMEDOUT)
553 break;
554 getnanouptime(&ts2);
555 if (timespeccmp(&ts2, &ts, >=)) {
556 error = ETIMEDOUT;
557 break;
558 }
559 ts3 = ts;
560 timespecsub(&ts3, &ts2);
561 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
562 }
563 }
564 /*
565 * This lets userland back off critical region if needed.
566 */
567 if (error == ERESTART)
568 error = EINTR;
569 return (error);
570 }
571
572 static int
573 do_unlock(struct thread *td, struct umtx *umtx, long id)
574 {
575 struct umtx_key key;
576 intptr_t owner;
577 intptr_t old;
578 int error;
579 int count;
580
581 /*
582 * Make sure we own this mtx.
583 *
584 * XXX Need a {fu,su}ptr this is not correct on arch where
585 * sizeof(intptr_t) != sizeof(long).
586 */
587 if ((owner = fuword(&umtx->u_owner)) == -1)
588 return (EFAULT);
589
590 if ((owner & ~UMTX_CONTESTED) != id)
591 return (EPERM);
592
593 /* We should only ever be in here for contested locks */
594 if ((owner & UMTX_CONTESTED) == 0)
595 return (EINVAL);
596
597 if ((error = umtx_key_get(td, umtx, &key)) != 0)
598 return (error);
599
600 umtxq_lock(&key);
601 umtxq_busy(&key);
602 count = umtxq_count(&key);
603 umtxq_unlock(&key);
604
605 /*
606 * When unlocking the umtx, it must be marked as unowned if
607 * there is zero or one thread only waiting for it.
608 * Otherwise, it must be marked as contested.
609 */
610 old = casuptr((intptr_t *)&umtx->u_owner, owner,
611 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
612 umtxq_lock(&key);
613 umtxq_signal(&key, 0);
614 umtxq_unbusy(&key);
615 umtxq_unlock(&key);
616 umtx_key_release(&key);
617 if (old == -1)
618 return (EFAULT);
619 if (old != owner)
620 return (EINVAL);
621 return (0);
622 }
623
624 static int
625 do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout)
626 {
627 struct umtx_q *uq;
628 struct timespec ts, ts2, ts3;
629 struct timeval tv;
630 long tmp;
631 int error = 0;
632
633 uq = td->td_umtxq;
634 if ((error = umtxq_queue_me(td, umtx, uq)) != 0)
635 return (error);
636 tmp = fuword(&umtx->u_owner);
637 if (tmp != id) {
638 umtxq_lock(&uq->uq_key);
639 umtxq_remove(uq);
640 umtxq_unlock(&uq->uq_key);
641 } else if (timeout == NULL) {
642 umtxq_lock(&uq->uq_key);
643 if (td->td_flags & TDF_UMTXQ)
644 error = umtxq_sleep(td, &uq->uq_key,
645 PCATCH, "ucond", 0);
646 if (!(td->td_flags & TDF_UMTXQ))
647 error = 0;
648 else
649 umtxq_remove(uq);
650 umtxq_unlock(&uq->uq_key);
651 } else {
652 getnanouptime(&ts);
653 timespecadd(&ts, timeout);
654 TIMESPEC_TO_TIMEVAL(&tv, timeout);
655 for (;;) {
656 umtxq_lock(&uq->uq_key);
657 if (td->td_flags & TDF_UMTXQ) {
658 error = umtxq_sleep(td, &uq->uq_key, PCATCH,
659 "ucond", tvtohz(&tv));
660 }
661 if (!(td->td_flags & TDF_UMTXQ)) {
662 umtxq_unlock(&uq->uq_key);
663 goto out;
664 }
665 umtxq_unlock(&uq->uq_key);
666 if (error != ETIMEDOUT)
667 break;
668 getnanouptime(&ts2);
669 if (timespeccmp(&ts2, &ts, >=)) {
670 error = ETIMEDOUT;
671 break;
672 }
673 ts3 = ts;
674 timespecsub(&ts3, &ts2);
675 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
676 }
677 umtxq_lock(&uq->uq_key);
678 umtxq_remove(uq);
679 umtxq_unlock(&uq->uq_key);
680 }
681 out:
682 umtx_key_release(&uq->uq_key);
683 if (error == ERESTART)
684 error = EINTR;
685 return (error);
686 }
687
688 int
689 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake)
690 {
691 struct umtx_key key;
692 int ret;
693
694 if ((ret = umtx_key_get(td, uaddr, &key)) != 0)
695 return (ret);
696 umtxq_lock(&key);
697 ret = umtxq_signal(&key, n_wake);
698 umtxq_unlock(&key);
699 umtx_key_release(&key);
700 return (0);
701 }
702
703 int
704 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
705 /* struct umtx *umtx */
706 {
707 return _do_lock(td, uap->umtx, td->td_tid, 0);
708 }
709
710 int
711 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
712 /* struct umtx *umtx */
713 {
714 return do_unlock(td, uap->umtx, td->td_tid);
715 }
716
717 int
718 _umtx_op(struct thread *td, struct _umtx_op_args *uap)
719 {
720 struct timespec timeout;
721 struct timespec *ts;
722 int error;
723
724 switch(uap->op) {
725 case UMTX_OP_LOCK:
726 /* Allow a null timespec (wait forever). */
727 if (uap->uaddr2 == NULL)
728 ts = NULL;
729 else {
730 error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
731 if (error != 0)
732 break;
733 if (timeout.tv_nsec >= 1000000000 ||
734 timeout.tv_nsec < 0) {
735 error = EINVAL;
736 break;
737 }
738 ts = &timeout;
739 }
740 error = do_lock(td, uap->umtx, uap->id, ts);
741 break;
742 case UMTX_OP_UNLOCK:
743 error = do_unlock(td, uap->umtx, uap->id);
744 break;
745 case UMTX_OP_WAIT:
746 /* Allow a null timespec (wait forever). */
747 if (uap->uaddr2 == NULL)
748 ts = NULL;
749 else {
750 error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
751 if (error != 0)
752 break;
753 if (timeout.tv_nsec >= 1000000000 ||
754 timeout.tv_nsec < 0) {
755 error = EINVAL;
756 break;
757 }
758 ts = &timeout;
759 }
760 error = do_wait(td, uap->umtx, uap->id, ts);
761 break;
762 case UMTX_OP_WAKE:
763 error = kern_umtx_wake(td, uap->umtx, uap->id);
764 break;
765 default:
766 error = EINVAL;
767 break;
768 }
769 return (error);
770 }
Cache object: 4a0f9bde7e465a0b98e92a8975e1f7fe
|