FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_umtx.c
1 /*-
2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/6.3/sys/kern/kern_umtx.c 173886 2007-11-24 19:45:58Z cvs2svn $");
30
31 #include "opt_compat.h"
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/sysent.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/eventhandler.h>
43 #include <sys/thr.h>
44 #include <sys/umtx.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_object.h>
51
52 #ifdef COMPAT_IA32
53 #include <compat/freebsd32/freebsd32_proto.h>
54
55 #define UMTX_CONTESTED32 (-0x7fffffff - 1)
56 #endif
57
58 #define UMTX_PRIVATE 0
59 #define UMTX_SHARED 1
60
61 #define UMTX_STATIC_SHARED
62
63 struct umtx_key {
64 int type;
65 union {
66 struct {
67 vm_object_t object;
68 long offset;
69 } shared;
70 struct {
71 struct umtx *umtx;
72 long pid;
73 } private;
74 struct {
75 void *ptr;
76 long word;
77 } both;
78 } info;
79 };
80
81 struct umtx_q {
82 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */
83 struct umtx_key uq_key; /* Umtx key. */
84 struct thread *uq_thread; /* The thread waits on. */
85 LIST_ENTRY(umtx_q) uq_rqnext; /* Linked list for requeuing. */
86 vm_offset_t uq_addr; /* Umtx's virtual address. */
87 };
88
89 LIST_HEAD(umtx_head, umtx_q);
90 struct umtxq_chain {
91 struct mtx uc_lock; /* Lock for this chain. */
92 struct umtx_head uc_queue; /* List of sleep queues. */
93 #define UCF_BUSY 0x01
94 #define UCF_WANT 0x02
95 int uc_flags;
96 };
97
98 #define GOLDEN_RATIO_PRIME 2654404609U
99 #define UMTX_CHAINS 128
100 #define UMTX_SHIFTS (__WORD_BIT - 7)
101
102 static struct umtxq_chain umtxq_chains[UMTX_CHAINS];
103 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
104
105 static void umtxq_init_chains(void *);
106 static int umtxq_hash(struct umtx_key *key);
107 static struct mtx *umtxq_mtx(int chain);
108 static void umtxq_lock(struct umtx_key *key);
109 static void umtxq_unlock(struct umtx_key *key);
110 static void umtxq_busy(struct umtx_key *key);
111 static void umtxq_unbusy(struct umtx_key *key);
112 static void umtxq_insert(struct umtx_q *uq);
113 static void umtxq_remove(struct umtx_q *uq);
114 static int umtxq_sleep(struct thread *td, struct umtx_key *key,
115 int prio, const char *wmesg, int timo);
116 static int umtxq_count(struct umtx_key *key);
117 static int umtxq_signal(struct umtx_key *key, int nr_wakeup);
118 #ifdef UMTX_DYNAMIC_SHARED
119 static void fork_handler(void *arg, struct proc *p1, struct proc *p2,
120 int flags);
121 #endif
122 static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2);
123 static int umtx_key_get(struct thread *td, void *umtx,
124 struct umtx_key *key);
125 static void umtx_key_release(struct umtx_key *key);
126
127 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL);
128
129 struct umtx_q *
130 umtxq_alloc(void)
131 {
132 return (malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK));
133 }
134
135 void
136 umtxq_free(struct umtx_q *uq)
137 {
138 free(uq, M_UMTX);
139 }
140
141 static void
142 umtxq_init_chains(void *arg __unused)
143 {
144 int i;
145
146 for (i = 0; i < UMTX_CHAINS; ++i) {
147 mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL,
148 MTX_DEF | MTX_DUPOK);
149 LIST_INIT(&umtxq_chains[i].uc_queue);
150 umtxq_chains[i].uc_flags = 0;
151 }
152 #ifdef UMTX_DYNAMIC_SHARED
153 EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000);
154 #endif
155 }
156
157 static inline int
158 umtxq_hash(struct umtx_key *key)
159 {
160 unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word;
161 return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS);
162 }
163
164 static inline int
165 umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2)
166 {
167 return (k1->type == k2->type &&
168 k1->info.both.ptr == k2->info.both.ptr &&
169 k1->info.both.word == k2->info.both.word);
170 }
171
172 static inline struct mtx *
173 umtxq_mtx(int chain)
174 {
175 return (&umtxq_chains[chain].uc_lock);
176 }
177
178 static inline void
179 umtxq_busy(struct umtx_key *key)
180 {
181 int chain = umtxq_hash(key);
182
183 mtx_assert(umtxq_mtx(chain), MA_OWNED);
184 while (umtxq_chains[chain].uc_flags & UCF_BUSY) {
185 umtxq_chains[chain].uc_flags |= UCF_WANT;
186 msleep(&umtxq_chains[chain], umtxq_mtx(chain),
187 0, "umtxq_busy", 0);
188 }
189 umtxq_chains[chain].uc_flags |= UCF_BUSY;
190 }
191
192 static inline void
193 umtxq_unbusy(struct umtx_key *key)
194 {
195 int chain = umtxq_hash(key);
196
197 mtx_assert(umtxq_mtx(chain), MA_OWNED);
198 KASSERT(umtxq_chains[chain].uc_flags & UCF_BUSY, ("not busy"));
199 umtxq_chains[chain].uc_flags &= ~UCF_BUSY;
200 if (umtxq_chains[chain].uc_flags & UCF_WANT) {
201 umtxq_chains[chain].uc_flags &= ~UCF_WANT;
202 wakeup(&umtxq_chains[chain]);
203 }
204 }
205
206 static inline void
207 umtxq_lock(struct umtx_key *key)
208 {
209 int chain = umtxq_hash(key);
210 mtx_lock(umtxq_mtx(chain));
211 }
212
213 static inline void
214 umtxq_unlock(struct umtx_key *key)
215 {
216 int chain = umtxq_hash(key);
217 mtx_unlock(umtxq_mtx(chain));
218 }
219
220 /*
221 * Insert a thread onto the umtx queue.
222 */
223 static inline void
224 umtxq_insert(struct umtx_q *uq)
225 {
226 struct umtx_head *head;
227 int chain = umtxq_hash(&uq->uq_key);
228
229 mtx_assert(umtxq_mtx(chain), MA_OWNED);
230 head = &umtxq_chains[chain].uc_queue;
231 LIST_INSERT_HEAD(head, uq, uq_next);
232 mtx_lock_spin(&sched_lock);
233 uq->uq_thread->td_flags |= TDF_UMTXQ;
234 mtx_unlock_spin(&sched_lock);
235 }
236
237 /*
238 * Remove thread from the umtx queue.
239 */
240 static inline void
241 umtxq_remove(struct umtx_q *uq)
242 {
243 mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED);
244 if (uq->uq_thread->td_flags & TDF_UMTXQ) {
245 LIST_REMOVE(uq, uq_next);
246 /* turning off TDF_UMTXQ should be the last thing. */
247 mtx_lock_spin(&sched_lock);
248 uq->uq_thread->td_flags &= ~TDF_UMTXQ;
249 mtx_unlock_spin(&sched_lock);
250 }
251 }
252
253 static int
254 umtxq_count(struct umtx_key *key)
255 {
256 struct umtx_q *uq;
257 struct umtx_head *head;
258 int chain, count = 0;
259
260 chain = umtxq_hash(key);
261 mtx_assert(umtxq_mtx(chain), MA_OWNED);
262 head = &umtxq_chains[chain].uc_queue;
263 LIST_FOREACH(uq, head, uq_next) {
264 if (umtx_key_match(&uq->uq_key, key)) {
265 if (++count > 1)
266 break;
267 }
268 }
269 return (count);
270 }
271
272 static int
273 umtxq_signal(struct umtx_key *key, int n_wake)
274 {
275 struct umtx_q *uq, *next;
276 struct umtx_head *head;
277 struct thread *blocked = NULL;
278 int chain, ret;
279
280 ret = 0;
281 chain = umtxq_hash(key);
282 mtx_assert(umtxq_mtx(chain), MA_OWNED);
283 head = &umtxq_chains[chain].uc_queue;
284 for (uq = LIST_FIRST(head); uq; uq = next) {
285 next = LIST_NEXT(uq, uq_next);
286 if (umtx_key_match(&uq->uq_key, key)) {
287 blocked = uq->uq_thread;
288 umtxq_remove(uq);
289 wakeup(blocked);
290 if (++ret >= n_wake)
291 break;
292 }
293 }
294 return (ret);
295 }
296
297 static inline int
298 umtxq_sleep(struct thread *td, struct umtx_key *key, int priority,
299 const char *wmesg, int timo)
300 {
301 int chain = umtxq_hash(key);
302 int error = msleep(td, umtxq_mtx(chain), priority, wmesg, timo);
303 if (error == EWOULDBLOCK)
304 error = ETIMEDOUT;
305 return (error);
306 }
307
308 static int
309 umtx_key_get(struct thread *td, void *umtx, struct umtx_key *key)
310 {
311 #if defined(UMTX_DYNAMIC_SHARED) || defined(UMTX_STATIC_SHARED)
312 vm_map_t map;
313 vm_map_entry_t entry;
314 vm_pindex_t pindex;
315 vm_prot_t prot;
316 boolean_t wired;
317
318 map = &td->td_proc->p_vmspace->vm_map;
319 if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE,
320 &entry, &key->info.shared.object, &pindex, &prot,
321 &wired) != KERN_SUCCESS) {
322 return EFAULT;
323 }
324 #endif
325
326 #if defined(UMTX_DYNAMIC_SHARED)
327 key->type = UMTX_SHARED;
328 key->info.shared.offset = entry->offset + entry->start -
329 (vm_offset_t)umtx;
330 /*
331 * Add object reference, if we don't do this, a buggy application
332 * deallocates the object, the object will be reused by other
333 * applications, then unlock will wake wrong thread.
334 */
335 vm_object_reference(key->info.shared.object);
336 vm_map_lookup_done(map, entry);
337 #elif defined(UMTX_STATIC_SHARED)
338 if (VM_INHERIT_SHARE == entry->inheritance) {
339 key->type = UMTX_SHARED;
340 key->info.shared.offset = entry->offset + entry->start -
341 (vm_offset_t)umtx;
342 vm_object_reference(key->info.shared.object);
343 } else {
344 key->type = UMTX_PRIVATE;
345 key->info.private.umtx = umtx;
346 key->info.private.pid = td->td_proc->p_pid;
347 }
348 vm_map_lookup_done(map, entry);
349 #else
350 key->type = UMTX_PRIVATE;
351 key->info.private.umtx = umtx;
352 key->info.private.pid = td->td_proc->p_pid;
353 #endif
354 return (0);
355 }
356
357 static inline void
358 umtx_key_release(struct umtx_key *key)
359 {
360 if (key->type == UMTX_SHARED)
361 vm_object_deallocate(key->info.shared.object);
362 }
363
364 static inline int
365 umtxq_queue_me(struct thread *td, void *umtx, struct umtx_q *uq)
366 {
367 int error;
368
369 if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0)
370 return (error);
371
372 uq->uq_addr = (vm_offset_t)umtx;
373 uq->uq_thread = td;
374 umtxq_lock(&uq->uq_key);
375 /* hmm, for condition variable, we don't need busy flag. */
376 umtxq_busy(&uq->uq_key);
377 umtxq_insert(uq);
378 umtxq_unbusy(&uq->uq_key);
379 umtxq_unlock(&uq->uq_key);
380 return (0);
381 }
382
383 #if defined(UMTX_DYNAMIC_SHARED)
384 static void
385 fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags)
386 {
387 vm_map_t map;
388 vm_map_entry_t entry;
389 vm_object_t object;
390 vm_pindex_t pindex;
391 vm_prot_t prot;
392 boolean_t wired;
393 struct umtx_key key;
394 LIST_HEAD(, umtx_q) workq;
395 struct umtx_q *uq;
396 struct thread *td;
397 int onq;
398
399 LIST_INIT(&workq);
400
401 /* Collect threads waiting on umtxq */
402 PROC_LOCK(p1);
403 FOREACH_THREAD_IN_PROC(p1, td) {
404 if (td->td_flags & TDF_UMTXQ) {
405 uq = td->td_umtxq;
406 if (uq)
407 LIST_INSERT_HEAD(&workq, uq, uq_rqnext);
408 }
409 }
410 PROC_UNLOCK(p1);
411
412 LIST_FOREACH(uq, &workq, uq_rqnext) {
413 map = &p1->p_vmspace->vm_map;
414 if (vm_map_lookup(&map, uq->uq_addr, VM_PROT_WRITE,
415 &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) {
416 continue;
417 }
418 key.type = UMTX_SHARED;
419 key.info.shared.object = object;
420 key.info.shared.offset = entry->offset + entry->start -
421 uq->uq_addr;
422 if (umtx_key_match(&key, &uq->uq_key)) {
423 vm_map_lookup_done(map, entry);
424 continue;
425 }
426
427 umtxq_lock(&uq->uq_key);
428 umtxq_busy(&uq->uq_key);
429 if (uq->uq_thread->td_flags & TDF_UMTXQ) {
430 umtxq_remove(uq);
431 onq = 1;
432 } else
433 onq = 0;
434 umtxq_unbusy(&uq->uq_key);
435 umtxq_unlock(&uq->uq_key);
436 if (onq) {
437 vm_object_deallocate(uq->uq_key.info.shared.object);
438 uq->uq_key = key;
439 umtxq_lock(&uq->uq_key);
440 umtxq_busy(&uq->uq_key);
441 umtxq_insert(uq);
442 umtxq_unbusy(&uq->uq_key);
443 umtxq_unlock(&uq->uq_key);
444 vm_object_reference(uq->uq_key.info.shared.object);
445 }
446 vm_map_lookup_done(map, entry);
447 }
448 }
449 #endif
450
451 static int
452 _do_lock(struct thread *td, struct umtx *umtx, long id, int timo)
453 {
454 struct umtx_q *uq;
455 intptr_t owner;
456 intptr_t old;
457 int error = 0;
458
459 uq = td->td_umtxq;
460 /*
461 * Care must be exercised when dealing with umtx structure. It
462 * can fault on any access.
463 */
464
465 for (;;) {
466 /*
467 * Try the uncontested case. This should be done in userland.
468 */
469 owner = casuptr((intptr_t *)&umtx->u_owner,
470 UMTX_UNOWNED, id);
471
472 /* The acquire succeeded. */
473 if (owner == UMTX_UNOWNED)
474 return (0);
475
476 /* The address was invalid. */
477 if (owner == -1)
478 return (EFAULT);
479
480 /* If no one owns it but it is contested try to acquire it. */
481 if (owner == UMTX_CONTESTED) {
482 owner = casuptr((intptr_t *)&umtx->u_owner,
483 UMTX_CONTESTED, id | UMTX_CONTESTED);
484
485 if (owner == UMTX_CONTESTED)
486 return (0);
487
488 /* The address was invalid. */
489 if (owner == -1)
490 return (EFAULT);
491
492 /* If this failed the lock has changed, restart. */
493 continue;
494 }
495
496 /*
497 * If we caught a signal, we have retried and now
498 * exit immediately.
499 */
500 if (error || (error = umtxq_queue_me(td, umtx, uq)) != 0)
501 return (error);
502
503 /*
504 * Set the contested bit so that a release in user space
505 * knows to use the system call for unlock. If this fails
506 * either some one else has acquired the lock or it has been
507 * released.
508 */
509 old = casuptr((intptr_t *)&umtx->u_owner, owner,
510 owner | UMTX_CONTESTED);
511
512 /* The address was invalid. */
513 if (old == -1) {
514 umtxq_lock(&uq->uq_key);
515 umtxq_busy(&uq->uq_key);
516 umtxq_remove(uq);
517 umtxq_unbusy(&uq->uq_key);
518 umtxq_unlock(&uq->uq_key);
519 umtx_key_release(&uq->uq_key);
520 return (EFAULT);
521 }
522
523 /*
524 * We set the contested bit, sleep. Otherwise the lock changed
525 * and we need to retry or we lost a race to the thread
526 * unlocking the umtx.
527 */
528 umtxq_lock(&uq->uq_key);
529 if (old == owner && (td->td_flags & TDF_UMTXQ)) {
530 error = umtxq_sleep(td, &uq->uq_key, PCATCH,
531 "umtx", timo);
532 }
533 umtxq_busy(&uq->uq_key);
534 umtxq_remove(uq);
535 umtxq_unbusy(&uq->uq_key);
536 umtxq_unlock(&uq->uq_key);
537 umtx_key_release(&uq->uq_key);
538 }
539
540 return (0);
541 }
542
543 static int
544 do_lock(struct thread *td, struct umtx *umtx, long id,
545 struct timespec *timeout)
546 {
547 struct timespec ts, ts2, ts3;
548 struct timeval tv;
549 int error;
550
551 if (timeout == NULL) {
552 error = _do_lock(td, umtx, id, 0);
553 } else {
554 getnanouptime(&ts);
555 timespecadd(&ts, timeout);
556 TIMESPEC_TO_TIMEVAL(&tv, timeout);
557 for (;;) {
558 error = _do_lock(td, umtx, id, tvtohz(&tv));
559 if (error != ETIMEDOUT)
560 break;
561 getnanouptime(&ts2);
562 if (timespeccmp(&ts2, &ts, >=)) {
563 error = ETIMEDOUT;
564 break;
565 }
566 ts3 = ts;
567 timespecsub(&ts3, &ts2);
568 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
569 }
570 }
571 /*
572 * This lets userland back off critical region if needed.
573 */
574 if (error == ERESTART)
575 error = EINTR;
576 return (error);
577 }
578
579 static int
580 do_unlock(struct thread *td, struct umtx *umtx, long id)
581 {
582 struct umtx_key key;
583 intptr_t owner;
584 intptr_t old;
585 int error;
586 int count;
587
588 /*
589 * Make sure we own this mtx.
590 *
591 * XXX Need a {fu,su}ptr this is not correct on arch where
592 * sizeof(intptr_t) != sizeof(long).
593 */
594 if ((owner = fuword(&umtx->u_owner)) == -1)
595 return (EFAULT);
596
597 if ((owner & ~UMTX_CONTESTED) != id)
598 return (EPERM);
599
600 /* We should only ever be in here for contested locks */
601 if ((owner & UMTX_CONTESTED) == 0)
602 return (EINVAL);
603
604 if ((error = umtx_key_get(td, umtx, &key)) != 0)
605 return (error);
606
607 umtxq_lock(&key);
608 umtxq_busy(&key);
609 count = umtxq_count(&key);
610 umtxq_unlock(&key);
611
612 /*
613 * When unlocking the umtx, it must be marked as unowned if
614 * there is zero or one thread only waiting for it.
615 * Otherwise, it must be marked as contested.
616 */
617 old = casuptr((intptr_t *)&umtx->u_owner, owner,
618 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
619 umtxq_lock(&key);
620 umtxq_signal(&key, 0);
621 umtxq_unbusy(&key);
622 umtxq_unlock(&key);
623 umtx_key_release(&key);
624 if (old == -1)
625 return (EFAULT);
626 if (old != owner)
627 return (EINVAL);
628 return (0);
629 }
630
631 #ifdef COMPAT_IA32
632 static int
633 _do_lock32(struct thread *td, uint32_t *m, uint32_t id, int timo)
634 {
635 struct umtx_q *uq;
636 int32_t owner;
637 int32_t old;
638 int error = 0;
639
640 uq = td->td_umtxq;
641 /*
642 * Care must be exercised when dealing with umtx structure. It
643 * can fault on any access.
644 */
645
646 for (;;) {
647 /*
648 * Try the uncontested case. This should be done in userland.
649 */
650 owner = casuword32(m, UMTX_UNOWNED, id);
651
652 /* The acquire succeeded. */
653 if (owner == UMTX_UNOWNED)
654 return (0);
655
656 /* The address was invalid. */
657 if (owner == -1)
658 return (EFAULT);
659
660 /* If no one owns it but it is contested try to acquire it. */
661 if (owner == UMTX_CONTESTED32) {
662 owner = casuword32(m,
663 UMTX_CONTESTED32, id | UMTX_CONTESTED32);
664
665 if (owner == UMTX_CONTESTED32)
666 return (0);
667
668 /* The address was invalid. */
669 if (owner == -1)
670 return (EFAULT);
671
672 /* If this failed the lock has changed, restart. */
673 continue;
674 }
675
676 /*
677 * If we caught a signal, we have retried and now
678 * exit immediately.
679 */
680 if (error || (error = umtxq_queue_me(td, m, uq)) != 0)
681 return (error);
682
683 /*
684 * Set the contested bit so that a release in user space
685 * knows to use the system call for unlock. If this fails
686 * either some one else has acquired the lock or it has been
687 * released.
688 */
689 old = casuword32(m, owner, owner | UMTX_CONTESTED32);
690
691 /* The address was invalid. */
692 if (old == -1) {
693 umtxq_lock(&uq->uq_key);
694 umtxq_busy(&uq->uq_key);
695 umtxq_remove(uq);
696 umtxq_unbusy(&uq->uq_key);
697 umtxq_unlock(&uq->uq_key);
698 umtx_key_release(&uq->uq_key);
699 return (EFAULT);
700 }
701
702 /*
703 * We set the contested bit, sleep. Otherwise the lock changed
704 * and we need to retry or we lost a race to the thread
705 * unlocking the umtx.
706 */
707 umtxq_lock(&uq->uq_key);
708 if (old == owner && (td->td_flags & TDF_UMTXQ)) {
709 error = umtxq_sleep(td, &uq->uq_key, PCATCH,
710 "umtx", timo);
711 }
712 umtxq_busy(&uq->uq_key);
713 umtxq_remove(uq);
714 umtxq_unbusy(&uq->uq_key);
715 umtxq_unlock(&uq->uq_key);
716 umtx_key_release(&uq->uq_key);
717 }
718
719 return (0);
720 }
721
722 static int
723 do_lock32(struct thread *td, void *m, uint32_t id,
724 struct timespec *timeout)
725 {
726 struct timespec ts, ts2, ts3;
727 struct timeval tv;
728 int error;
729
730 if (timeout == NULL) {
731 error = _do_lock32(td, m, id, 0);
732 } else {
733 getnanouptime(&ts);
734 timespecadd(&ts, timeout);
735 TIMESPEC_TO_TIMEVAL(&tv, timeout);
736 for (;;) {
737 error = _do_lock32(td, m, id, tvtohz(&tv));
738 if (error != ETIMEDOUT)
739 break;
740 getnanouptime(&ts2);
741 if (timespeccmp(&ts2, &ts, >=)) {
742 error = ETIMEDOUT;
743 break;
744 }
745 ts3 = ts;
746 timespecsub(&ts3, &ts2);
747 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
748 }
749 }
750 /*
751 * This lets userland back off critical region if needed.
752 */
753 if (error == ERESTART)
754 error = EINTR;
755 return (error);
756 }
757
758 static int
759 do_unlock32(struct thread *td, uint32_t *m, uint32_t id)
760 {
761 struct umtx_key key;
762 int32_t owner;
763 int32_t old;
764 int error;
765 int count;
766
767 /*
768 * Make sure we own this mtx.
769 *
770 * XXX Need a {fu,su}ptr this is not correct on arch where
771 * sizeof(intptr_t) != sizeof(long).
772 */
773 if ((owner = fuword32(m)) == -1)
774 return (EFAULT);
775
776 if ((owner & ~UMTX_CONTESTED32) != id)
777 return (EPERM);
778
779 /* We should only ever be in here for contested locks */
780 if ((owner & UMTX_CONTESTED32) == 0)
781 return (EINVAL);
782
783 if ((error = umtx_key_get(td, m, &key)) != 0)
784 return (error);
785
786 umtxq_lock(&key);
787 umtxq_busy(&key);
788 count = umtxq_count(&key);
789 umtxq_unlock(&key);
790
791 /*
792 * When unlocking the umtx, it must be marked as unowned if
793 * there is zero or one thread only waiting for it.
794 * Otherwise, it must be marked as contested.
795 */
796 old = casuword32(m, owner,
797 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED32);
798 umtxq_lock(&key);
799 umtxq_signal(&key, 0);
800 umtxq_unbusy(&key);
801 umtxq_unlock(&key);
802 umtx_key_release(&key);
803 if (old == -1)
804 return (EFAULT);
805 if (old != owner)
806 return (EINVAL);
807 return (0);
808 }
809 #endif
810
811 static int
812 do_wait(struct thread *td, struct umtx *umtx, long id, struct timespec *timeout,
813 int compat32)
814 {
815 struct umtx_q *uq;
816 struct timespec ts, ts2, ts3;
817 struct timeval tv;
818 long tmp;
819 int error = 0;
820
821 uq = td->td_umtxq;
822 if ((error = umtxq_queue_me(td, umtx, uq)) != 0)
823 return (error);
824 if (compat32 == 0)
825 tmp = fuword(&umtx->u_owner);
826 else
827 tmp = fuword32(&umtx->u_owner);
828 if (tmp != id) {
829 umtxq_lock(&uq->uq_key);
830 umtxq_remove(uq);
831 umtxq_unlock(&uq->uq_key);
832 } else if (timeout == NULL) {
833 umtxq_lock(&uq->uq_key);
834 if (td->td_flags & TDF_UMTXQ)
835 error = umtxq_sleep(td, &uq->uq_key,
836 PCATCH, "ucond", 0);
837 if (!(td->td_flags & TDF_UMTXQ))
838 error = 0;
839 else
840 umtxq_remove(uq);
841 umtxq_unlock(&uq->uq_key);
842 } else {
843 getnanouptime(&ts);
844 timespecadd(&ts, timeout);
845 TIMESPEC_TO_TIMEVAL(&tv, timeout);
846 for (;;) {
847 umtxq_lock(&uq->uq_key);
848 if (td->td_flags & TDF_UMTXQ) {
849 error = umtxq_sleep(td, &uq->uq_key, PCATCH,
850 "ucond", tvtohz(&tv));
851 }
852 if (!(td->td_flags & TDF_UMTXQ)) {
853 umtxq_unlock(&uq->uq_key);
854 goto out;
855 }
856 umtxq_unlock(&uq->uq_key);
857 if (error != ETIMEDOUT)
858 break;
859 getnanouptime(&ts2);
860 if (timespeccmp(&ts2, &ts, >=)) {
861 error = ETIMEDOUT;
862 break;
863 }
864 ts3 = ts;
865 timespecsub(&ts3, &ts2);
866 TIMESPEC_TO_TIMEVAL(&tv, &ts3);
867 }
868 umtxq_lock(&uq->uq_key);
869 umtxq_remove(uq);
870 umtxq_unlock(&uq->uq_key);
871 }
872 out:
873 umtx_key_release(&uq->uq_key);
874 if (error == ERESTART)
875 error = EINTR;
876 return (error);
877 }
878
879 int
880 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake)
881 {
882 struct umtx_key key;
883 int ret;
884
885 if ((ret = umtx_key_get(td, uaddr, &key)) != 0)
886 return (ret);
887 umtxq_lock(&key);
888 ret = umtxq_signal(&key, n_wake);
889 umtxq_unlock(&key);
890 umtx_key_release(&key);
891 return (0);
892 }
893
894 int
895 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
896 /* struct umtx *umtx */
897 {
898 return _do_lock(td, uap->umtx, td->td_tid, 0);
899 }
900
901 int
902 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
903 /* struct umtx *umtx */
904 {
905 return do_unlock(td, uap->umtx, td->td_tid);
906 }
907
908 int
909 _umtx_op(struct thread *td, struct _umtx_op_args *uap)
910 {
911 struct timespec timeout;
912 struct timespec *ts;
913 int error;
914
915 switch(uap->op) {
916 case UMTX_OP_LOCK:
917 /* Allow a null timespec (wait forever). */
918 if (uap->uaddr2 == NULL)
919 ts = NULL;
920 else {
921 error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
922 if (error != 0)
923 break;
924 if (timeout.tv_nsec >= 1000000000 ||
925 timeout.tv_nsec < 0) {
926 error = EINVAL;
927 break;
928 }
929 ts = &timeout;
930 }
931 error = do_lock(td, uap->umtx, uap->id, ts);
932 break;
933 case UMTX_OP_UNLOCK:
934 error = do_unlock(td, uap->umtx, uap->id);
935 break;
936 case UMTX_OP_WAIT:
937 /* Allow a null timespec (wait forever). */
938 if (uap->uaddr2 == NULL)
939 ts = NULL;
940 else {
941 error = copyin(uap->uaddr2, &timeout, sizeof(timeout));
942 if (error != 0)
943 break;
944 if (timeout.tv_nsec >= 1000000000 ||
945 timeout.tv_nsec < 0) {
946 error = EINVAL;
947 break;
948 }
949 ts = &timeout;
950 }
951 error = do_wait(td, uap->umtx, uap->id, ts, 0);
952 break;
953 case UMTX_OP_WAKE:
954 error = kern_umtx_wake(td, uap->umtx, uap->id);
955 break;
956 default:
957 error = EINVAL;
958 break;
959 }
960 return (error);
961 }
962
963 #ifdef COMPAT_IA32
964 int
965 freebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap)
966 /* struct umtx *umtx */
967 {
968 return (do_lock32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
969 }
970
971 int
972 freebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap)
973 /* struct umtx *umtx */
974 {
975 return (do_unlock32(td, (uint32_t *)uap->umtx, td->td_tid));
976 }
977
978 struct timespec32 {
979 u_int32_t tv_sec;
980 u_int32_t tv_nsec;
981 };
982
983 static inline int
984 copyin_timeout32(void *addr, struct timespec *tsp)
985 {
986 struct timespec32 ts32;
987 int error;
988
989 error = copyin(addr, &ts32, sizeof(struct timespec32));
990 if (error == 0) {
991 tsp->tv_sec = ts32.tv_sec;
992 tsp->tv_nsec = ts32.tv_nsec;
993 }
994 return (error);
995 }
996
997 static int
998 __umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
999 {
1000 struct timespec *ts, timeout;
1001 int error;
1002
1003 /* Allow a null timespec (wait forever). */
1004 if (uap->uaddr2 == NULL)
1005 ts = NULL;
1006 else {
1007 error = copyin_timeout32(uap->uaddr2, &timeout);
1008 if (error != 0)
1009 return (error);
1010 if (timeout.tv_nsec >= 1000000000 ||
1011 timeout.tv_nsec < 0) {
1012 return (EINVAL);
1013 }
1014 ts = &timeout;
1015 }
1016 return (do_lock32(td, uap->umtx, uap->id, ts));
1017 }
1018
1019 static int
1020 __umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
1021 {
1022 return (do_unlock32(td, (uint32_t *)uap->umtx, (uint32_t)uap->id));
1023 }
1024
1025 static int
1026 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
1027 {
1028 struct timespec *ts, timeout;
1029 int error;
1030
1031 if (uap->uaddr2 == NULL)
1032 ts = NULL;
1033 else {
1034 error = copyin_timeout32(uap->uaddr2, &timeout);
1035 if (error != 0)
1036 return (error);
1037 if (timeout.tv_nsec >= 1000000000 ||
1038 timeout.tv_nsec < 0)
1039 return (EINVAL);
1040 ts = &timeout;
1041 }
1042 return do_wait(td, uap->umtx, uap->id, ts, 1);
1043 }
1044
1045 int
1046 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
1047 {
1048
1049 switch ((unsigned)uap->op) {
1050 case UMTX_OP_LOCK:
1051 return __umtx_op_lock_umtx_compat32(td,
1052 (struct _umtx_op_args *)uap);
1053 case UMTX_OP_UNLOCK:
1054 return __umtx_op_unlock_umtx_compat32(td,
1055 (struct _umtx_op_args *)uap);
1056 case UMTX_OP_WAIT:
1057 return __umtx_op_wait_compat32(td,
1058 (struct _umtx_op_args *)uap);
1059 case UMTX_OP_WAKE:
1060 return kern_umtx_wake(td, (uint32_t *)uap->umtx, uap->id);
1061 default:
1062 return (EINVAL);
1063 }
1064 }
1065 #endif
Cache object: 87ef8057e9ab422091633eba7c453013
|