FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_umtx.c
1 /*-
2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/10.2/sys/kern/kern_umtx.c 280309 2015-03-21 01:39:44Z kib $");
30
31 #include "opt_compat.h"
32 #include "opt_umtx_profiling.h"
33
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/priv.h>
41 #include <sys/proc.h>
42 #include <sys/sbuf.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 #include <sys/sysent.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/eventhandler.h>
51 #include <sys/umtx.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_object.h>
58
59 #include <machine/cpu.h>
60
61 #ifdef COMPAT_FREEBSD32
62 #include <compat/freebsd32/freebsd32_proto.h>
63 #endif
64
65 #define _UMUTEX_TRY 1
66 #define _UMUTEX_WAIT 2
67
68 #ifdef UMTX_PROFILING
69 #define UPROF_PERC_BIGGER(w, f, sw, sf) \
70 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
71 #endif
72
73 /* Priority inheritance mutex info. */
74 struct umtx_pi {
75 /* Owner thread */
76 struct thread *pi_owner;
77
78 /* Reference count */
79 int pi_refcount;
80
81 /* List entry to link umtx holding by thread */
82 TAILQ_ENTRY(umtx_pi) pi_link;
83
84 /* List entry in hash */
85 TAILQ_ENTRY(umtx_pi) pi_hashlink;
86
87 /* List for waiters */
88 TAILQ_HEAD(,umtx_q) pi_blocked;
89
90 /* Identify a userland lock object */
91 struct umtx_key pi_key;
92 };
93
94 /* A userland synchronous object user. */
95 struct umtx_q {
96 /* Linked list for the hash. */
97 TAILQ_ENTRY(umtx_q) uq_link;
98
99 /* Umtx key. */
100 struct umtx_key uq_key;
101
102 /* Umtx flags. */
103 int uq_flags;
104 #define UQF_UMTXQ 0x0001
105
106 /* The thread waits on. */
107 struct thread *uq_thread;
108
109 /*
110 * Blocked on PI mutex. read can use chain lock
111 * or umtx_lock, write must have both chain lock and
112 * umtx_lock being hold.
113 */
114 struct umtx_pi *uq_pi_blocked;
115
116 /* On blocked list */
117 TAILQ_ENTRY(umtx_q) uq_lockq;
118
119 /* Thread contending with us */
120 TAILQ_HEAD(,umtx_pi) uq_pi_contested;
121
122 /* Inherited priority from PP mutex */
123 u_char uq_inherited_pri;
124
125 /* Spare queue ready to be reused */
126 struct umtxq_queue *uq_spare_queue;
127
128 /* The queue we on */
129 struct umtxq_queue *uq_cur_queue;
130 };
131
132 TAILQ_HEAD(umtxq_head, umtx_q);
133
134 /* Per-key wait-queue */
135 struct umtxq_queue {
136 struct umtxq_head head;
137 struct umtx_key key;
138 LIST_ENTRY(umtxq_queue) link;
139 int length;
140 };
141
142 LIST_HEAD(umtxq_list, umtxq_queue);
143
144 /* Userland lock object's wait-queue chain */
145 struct umtxq_chain {
146 /* Lock for this chain. */
147 struct mtx uc_lock;
148
149 /* List of sleep queues. */
150 struct umtxq_list uc_queue[2];
151 #define UMTX_SHARED_QUEUE 0
152 #define UMTX_EXCLUSIVE_QUEUE 1
153
154 LIST_HEAD(, umtxq_queue) uc_spare_queue;
155
156 /* Busy flag */
157 char uc_busy;
158
159 /* Chain lock waiters */
160 int uc_waiters;
161
162 /* All PI in the list */
163 TAILQ_HEAD(,umtx_pi) uc_pi_list;
164
165 #ifdef UMTX_PROFILING
166 u_int length;
167 u_int max_length;
168 #endif
169 };
170
171 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
172
173 /*
174 * Don't propagate time-sharing priority, there is a security reason,
175 * a user can simply introduce PI-mutex, let thread A lock the mutex,
176 * and let another thread B block on the mutex, because B is
177 * sleeping, its priority will be boosted, this causes A's priority to
178 * be boosted via priority propagating too and will never be lowered even
179 * if it is using 100%CPU, this is unfair to other processes.
180 */
181
182 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
183 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
184 PRI_MAX_TIMESHARE : (td)->td_user_pri)
185
186 #define GOLDEN_RATIO_PRIME 2654404609U
187 #define UMTX_CHAINS 512
188 #define UMTX_SHIFTS (__WORD_BIT - 9)
189
190 #define GET_SHARE(flags) \
191 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
192
193 #define BUSY_SPINS 200
194
195 struct abs_timeout {
196 int clockid;
197 struct timespec cur;
198 struct timespec end;
199 };
200
201 static uma_zone_t umtx_pi_zone;
202 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS];
203 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
204 static int umtx_pi_allocated;
205
206 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW, 0, "umtx debug");
207 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD,
208 &umtx_pi_allocated, 0, "Allocated umtx_pi");
209
210 #ifdef UMTX_PROFILING
211 static long max_length;
212 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length");
213 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD, 0, "umtx chain stats");
214 #endif
215
216 static void umtxq_sysinit(void *);
217 static void umtxq_hash(struct umtx_key *key);
218 static struct umtxq_chain *umtxq_getchain(struct umtx_key *key);
219 static void umtxq_lock(struct umtx_key *key);
220 static void umtxq_unlock(struct umtx_key *key);
221 static void umtxq_busy(struct umtx_key *key);
222 static void umtxq_unbusy(struct umtx_key *key);
223 static void umtxq_insert_queue(struct umtx_q *uq, int q);
224 static void umtxq_remove_queue(struct umtx_q *uq, int q);
225 static int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *);
226 static int umtxq_count(struct umtx_key *key);
227 static struct umtx_pi *umtx_pi_alloc(int);
228 static void umtx_pi_free(struct umtx_pi *pi);
229 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags);
230 static void umtx_thread_cleanup(struct thread *td);
231 static void umtx_exec_hook(void *arg __unused, struct proc *p __unused,
232 struct image_params *imgp __unused);
233 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL);
234
235 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
236 #define umtxq_insert(uq) umtxq_insert_queue((uq), UMTX_SHARED_QUEUE)
237 #define umtxq_remove(uq) umtxq_remove_queue((uq), UMTX_SHARED_QUEUE)
238
239 static struct mtx umtx_lock;
240
241 #ifdef UMTX_PROFILING
242 static void
243 umtx_init_profiling(void)
244 {
245 struct sysctl_oid *chain_oid;
246 char chain_name[10];
247 int i;
248
249 for (i = 0; i < UMTX_CHAINS; ++i) {
250 snprintf(chain_name, sizeof(chain_name), "%d", i);
251 chain_oid = SYSCTL_ADD_NODE(NULL,
252 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
253 chain_name, CTLFLAG_RD, NULL, "umtx hash stats");
254 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
255 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL);
256 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
257 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL);
258 }
259 }
260
261 static int
262 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
263 {
264 char buf[512];
265 struct sbuf sb;
266 struct umtxq_chain *uc;
267 u_int fract, i, j, tot, whole;
268 u_int sf0, sf1, sf2, sf3, sf4;
269 u_int si0, si1, si2, si3, si4;
270 u_int sw0, sw1, sw2, sw3, sw4;
271
272 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
273 for (i = 0; i < 2; i++) {
274 tot = 0;
275 for (j = 0; j < UMTX_CHAINS; ++j) {
276 uc = &umtxq_chains[i][j];
277 mtx_lock(&uc->uc_lock);
278 tot += uc->max_length;
279 mtx_unlock(&uc->uc_lock);
280 }
281 if (tot == 0)
282 sbuf_printf(&sb, "%u) Empty ", i);
283 else {
284 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
285 si0 = si1 = si2 = si3 = si4 = 0;
286 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
287 for (j = 0; j < UMTX_CHAINS; j++) {
288 uc = &umtxq_chains[i][j];
289 mtx_lock(&uc->uc_lock);
290 whole = uc->max_length * 100;
291 mtx_unlock(&uc->uc_lock);
292 fract = (whole % tot) * 100;
293 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
294 sf0 = fract;
295 si0 = j;
296 sw0 = whole;
297 } else if (UPROF_PERC_BIGGER(whole, fract, sw1,
298 sf1)) {
299 sf1 = fract;
300 si1 = j;
301 sw1 = whole;
302 } else if (UPROF_PERC_BIGGER(whole, fract, sw2,
303 sf2)) {
304 sf2 = fract;
305 si2 = j;
306 sw2 = whole;
307 } else if (UPROF_PERC_BIGGER(whole, fract, sw3,
308 sf3)) {
309 sf3 = fract;
310 si3 = j;
311 sw3 = whole;
312 } else if (UPROF_PERC_BIGGER(whole, fract, sw4,
313 sf4)) {
314 sf4 = fract;
315 si4 = j;
316 sw4 = whole;
317 }
318 }
319 sbuf_printf(&sb, "queue %u:\n", i);
320 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot,
321 sf0 / tot, si0);
322 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot,
323 sf1 / tot, si1);
324 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot,
325 sf2 / tot, si2);
326 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot,
327 sf3 / tot, si3);
328 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot,
329 sf4 / tot, si4);
330 }
331 }
332 sbuf_trim(&sb);
333 sbuf_finish(&sb);
334 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
335 sbuf_delete(&sb);
336 return (0);
337 }
338
339 static int
340 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
341 {
342 struct umtxq_chain *uc;
343 u_int i, j;
344 int clear, error;
345
346 clear = 0;
347 error = sysctl_handle_int(oidp, &clear, 0, req);
348 if (error != 0 || req->newptr == NULL)
349 return (error);
350
351 if (clear != 0) {
352 for (i = 0; i < 2; ++i) {
353 for (j = 0; j < UMTX_CHAINS; ++j) {
354 uc = &umtxq_chains[i][j];
355 mtx_lock(&uc->uc_lock);
356 uc->length = 0;
357 uc->max_length = 0;
358 mtx_unlock(&uc->uc_lock);
359 }
360 }
361 }
362 return (0);
363 }
364
365 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear,
366 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
367 sysctl_debug_umtx_chains_clear, "I", "Clear umtx chains statistics");
368 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks,
369 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
370 sysctl_debug_umtx_chains_peaks, "A", "Highest peaks in chains max length");
371 #endif
372
373 static void
374 umtxq_sysinit(void *arg __unused)
375 {
376 int i, j;
377
378 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi),
379 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
380 for (i = 0; i < 2; ++i) {
381 for (j = 0; j < UMTX_CHAINS; ++j) {
382 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL,
383 MTX_DEF | MTX_DUPOK);
384 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]);
385 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]);
386 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue);
387 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list);
388 umtxq_chains[i][j].uc_busy = 0;
389 umtxq_chains[i][j].uc_waiters = 0;
390 #ifdef UMTX_PROFILING
391 umtxq_chains[i][j].length = 0;
392 umtxq_chains[i][j].max_length = 0;
393 #endif
394 }
395 }
396 #ifdef UMTX_PROFILING
397 umtx_init_profiling();
398 #endif
399 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF);
400 EVENTHANDLER_REGISTER(process_exec, umtx_exec_hook, NULL,
401 EVENTHANDLER_PRI_ANY);
402 }
403
404 struct umtx_q *
405 umtxq_alloc(void)
406 {
407 struct umtx_q *uq;
408
409 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
410 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, M_WAITOK | M_ZERO);
411 TAILQ_INIT(&uq->uq_spare_queue->head);
412 TAILQ_INIT(&uq->uq_pi_contested);
413 uq->uq_inherited_pri = PRI_MAX;
414 return (uq);
415 }
416
417 void
418 umtxq_free(struct umtx_q *uq)
419 {
420 MPASS(uq->uq_spare_queue != NULL);
421 free(uq->uq_spare_queue, M_UMTX);
422 free(uq, M_UMTX);
423 }
424
425 static inline void
426 umtxq_hash(struct umtx_key *key)
427 {
428 unsigned n = (uintptr_t)key->info.both.a + key->info.both.b;
429 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS;
430 }
431
432 static inline struct umtxq_chain *
433 umtxq_getchain(struct umtx_key *key)
434 {
435 if (key->type <= TYPE_SEM)
436 return (&umtxq_chains[1][key->hash]);
437 return (&umtxq_chains[0][key->hash]);
438 }
439
440 /*
441 * Lock a chain.
442 */
443 static inline void
444 umtxq_lock(struct umtx_key *key)
445 {
446 struct umtxq_chain *uc;
447
448 uc = umtxq_getchain(key);
449 mtx_lock(&uc->uc_lock);
450 }
451
452 /*
453 * Unlock a chain.
454 */
455 static inline void
456 umtxq_unlock(struct umtx_key *key)
457 {
458 struct umtxq_chain *uc;
459
460 uc = umtxq_getchain(key);
461 mtx_unlock(&uc->uc_lock);
462 }
463
464 /*
465 * Set chain to busy state when following operation
466 * may be blocked (kernel mutex can not be used).
467 */
468 static inline void
469 umtxq_busy(struct umtx_key *key)
470 {
471 struct umtxq_chain *uc;
472
473 uc = umtxq_getchain(key);
474 mtx_assert(&uc->uc_lock, MA_OWNED);
475 if (uc->uc_busy) {
476 #ifdef SMP
477 if (smp_cpus > 1) {
478 int count = BUSY_SPINS;
479 if (count > 0) {
480 umtxq_unlock(key);
481 while (uc->uc_busy && --count > 0)
482 cpu_spinwait();
483 umtxq_lock(key);
484 }
485 }
486 #endif
487 while (uc->uc_busy) {
488 uc->uc_waiters++;
489 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0);
490 uc->uc_waiters--;
491 }
492 }
493 uc->uc_busy = 1;
494 }
495
496 /*
497 * Unbusy a chain.
498 */
499 static inline void
500 umtxq_unbusy(struct umtx_key *key)
501 {
502 struct umtxq_chain *uc;
503
504 uc = umtxq_getchain(key);
505 mtx_assert(&uc->uc_lock, MA_OWNED);
506 KASSERT(uc->uc_busy != 0, ("not busy"));
507 uc->uc_busy = 0;
508 if (uc->uc_waiters)
509 wakeup_one(uc);
510 }
511
512 static inline void
513 umtxq_unbusy_unlocked(struct umtx_key *key)
514 {
515
516 umtxq_lock(key);
517 umtxq_unbusy(key);
518 umtxq_unlock(key);
519 }
520
521 static struct umtxq_queue *
522 umtxq_queue_lookup(struct umtx_key *key, int q)
523 {
524 struct umtxq_queue *uh;
525 struct umtxq_chain *uc;
526
527 uc = umtxq_getchain(key);
528 UMTXQ_LOCKED_ASSERT(uc);
529 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
530 if (umtx_key_match(&uh->key, key))
531 return (uh);
532 }
533
534 return (NULL);
535 }
536
537 static inline void
538 umtxq_insert_queue(struct umtx_q *uq, int q)
539 {
540 struct umtxq_queue *uh;
541 struct umtxq_chain *uc;
542
543 uc = umtxq_getchain(&uq->uq_key);
544 UMTXQ_LOCKED_ASSERT(uc);
545 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue"));
546 uh = umtxq_queue_lookup(&uq->uq_key, q);
547 if (uh != NULL) {
548 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
549 } else {
550 uh = uq->uq_spare_queue;
551 uh->key = uq->uq_key;
552 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
553 #ifdef UMTX_PROFILING
554 uc->length++;
555 if (uc->length > uc->max_length) {
556 uc->max_length = uc->length;
557 if (uc->max_length > max_length)
558 max_length = uc->max_length;
559 }
560 #endif
561 }
562 uq->uq_spare_queue = NULL;
563
564 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
565 uh->length++;
566 uq->uq_flags |= UQF_UMTXQ;
567 uq->uq_cur_queue = uh;
568 return;
569 }
570
571 static inline void
572 umtxq_remove_queue(struct umtx_q *uq, int q)
573 {
574 struct umtxq_chain *uc;
575 struct umtxq_queue *uh;
576
577 uc = umtxq_getchain(&uq->uq_key);
578 UMTXQ_LOCKED_ASSERT(uc);
579 if (uq->uq_flags & UQF_UMTXQ) {
580 uh = uq->uq_cur_queue;
581 TAILQ_REMOVE(&uh->head, uq, uq_link);
582 uh->length--;
583 uq->uq_flags &= ~UQF_UMTXQ;
584 if (TAILQ_EMPTY(&uh->head)) {
585 KASSERT(uh->length == 0,
586 ("inconsistent umtxq_queue length"));
587 #ifdef UMTX_PROFILING
588 uc->length--;
589 #endif
590 LIST_REMOVE(uh, link);
591 } else {
592 uh = LIST_FIRST(&uc->uc_spare_queue);
593 KASSERT(uh != NULL, ("uc_spare_queue is empty"));
594 LIST_REMOVE(uh, link);
595 }
596 uq->uq_spare_queue = uh;
597 uq->uq_cur_queue = NULL;
598 }
599 }
600
601 /*
602 * Check if there are multiple waiters
603 */
604 static int
605 umtxq_count(struct umtx_key *key)
606 {
607 struct umtxq_chain *uc;
608 struct umtxq_queue *uh;
609
610 uc = umtxq_getchain(key);
611 UMTXQ_LOCKED_ASSERT(uc);
612 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
613 if (uh != NULL)
614 return (uh->length);
615 return (0);
616 }
617
618 /*
619 * Check if there are multiple PI waiters and returns first
620 * waiter.
621 */
622 static int
623 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
624 {
625 struct umtxq_chain *uc;
626 struct umtxq_queue *uh;
627
628 *first = NULL;
629 uc = umtxq_getchain(key);
630 UMTXQ_LOCKED_ASSERT(uc);
631 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE);
632 if (uh != NULL) {
633 *first = TAILQ_FIRST(&uh->head);
634 return (uh->length);
635 }
636 return (0);
637 }
638
639 static int
640 umtxq_check_susp(struct thread *td)
641 {
642 struct proc *p;
643 int error;
644
645 /*
646 * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
647 * eventually break the lockstep loop.
648 */
649 if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
650 return (0);
651 error = 0;
652 p = td->td_proc;
653 PROC_LOCK(p);
654 if (P_SHOULDSTOP(p) ||
655 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) {
656 if (p->p_flag & P_SINGLE_EXIT)
657 error = EINTR;
658 else
659 error = ERESTART;
660 }
661 PROC_UNLOCK(p);
662 return (error);
663 }
664
665 /*
666 * Wake up threads waiting on an userland object.
667 */
668
669 static int
670 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
671 {
672 struct umtxq_chain *uc;
673 struct umtxq_queue *uh;
674 struct umtx_q *uq;
675 int ret;
676
677 ret = 0;
678 uc = umtxq_getchain(key);
679 UMTXQ_LOCKED_ASSERT(uc);
680 uh = umtxq_queue_lookup(key, q);
681 if (uh != NULL) {
682 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
683 umtxq_remove_queue(uq, q);
684 wakeup(uq);
685 if (++ret >= n_wake)
686 return (ret);
687 }
688 }
689 return (ret);
690 }
691
692
693 /*
694 * Wake up specified thread.
695 */
696 static inline void
697 umtxq_signal_thread(struct umtx_q *uq)
698 {
699 struct umtxq_chain *uc;
700
701 uc = umtxq_getchain(&uq->uq_key);
702 UMTXQ_LOCKED_ASSERT(uc);
703 umtxq_remove(uq);
704 wakeup(uq);
705 }
706
707 static inline int
708 tstohz(const struct timespec *tsp)
709 {
710 struct timeval tv;
711
712 TIMESPEC_TO_TIMEVAL(&tv, tsp);
713 return tvtohz(&tv);
714 }
715
716 static void
717 abs_timeout_init(struct abs_timeout *timo, int clockid, int absolute,
718 const struct timespec *timeout)
719 {
720
721 timo->clockid = clockid;
722 if (!absolute) {
723 kern_clock_gettime(curthread, clockid, &timo->end);
724 timo->cur = timo->end;
725 timespecadd(&timo->end, timeout);
726 } else {
727 timo->end = *timeout;
728 kern_clock_gettime(curthread, clockid, &timo->cur);
729 }
730 }
731
732 static void
733 abs_timeout_init2(struct abs_timeout *timo, const struct _umtx_time *umtxtime)
734 {
735
736 abs_timeout_init(timo, umtxtime->_clockid,
737 (umtxtime->_flags & UMTX_ABSTIME) != 0,
738 &umtxtime->_timeout);
739 }
740
741 static inline void
742 abs_timeout_update(struct abs_timeout *timo)
743 {
744 kern_clock_gettime(curthread, timo->clockid, &timo->cur);
745 }
746
747 static int
748 abs_timeout_gethz(struct abs_timeout *timo)
749 {
750 struct timespec tts;
751
752 if (timespeccmp(&timo->end, &timo->cur, <=))
753 return (-1);
754 tts = timo->end;
755 timespecsub(&tts, &timo->cur);
756 return (tstohz(&tts));
757 }
758
759 /*
760 * Put thread into sleep state, before sleeping, check if
761 * thread was removed from umtx queue.
762 */
763 static inline int
764 umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct abs_timeout *abstime)
765 {
766 struct umtxq_chain *uc;
767 int error, timo;
768
769 uc = umtxq_getchain(&uq->uq_key);
770 UMTXQ_LOCKED_ASSERT(uc);
771 for (;;) {
772 if (!(uq->uq_flags & UQF_UMTXQ))
773 return (0);
774 if (abstime != NULL) {
775 timo = abs_timeout_gethz(abstime);
776 if (timo < 0)
777 return (ETIMEDOUT);
778 } else
779 timo = 0;
780 error = msleep(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, timo);
781 if (error != EWOULDBLOCK) {
782 umtxq_lock(&uq->uq_key);
783 break;
784 }
785 if (abstime != NULL)
786 abs_timeout_update(abstime);
787 umtxq_lock(&uq->uq_key);
788 }
789 return (error);
790 }
791
792 /*
793 * Convert userspace address into unique logical address.
794 */
795 int
796 umtx_key_get(void *addr, int type, int share, struct umtx_key *key)
797 {
798 struct thread *td = curthread;
799 vm_map_t map;
800 vm_map_entry_t entry;
801 vm_pindex_t pindex;
802 vm_prot_t prot;
803 boolean_t wired;
804
805 key->type = type;
806 if (share == THREAD_SHARE) {
807 key->shared = 0;
808 key->info.private.vs = td->td_proc->p_vmspace;
809 key->info.private.addr = (uintptr_t)addr;
810 } else {
811 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
812 map = &td->td_proc->p_vmspace->vm_map;
813 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE,
814 &entry, &key->info.shared.object, &pindex, &prot,
815 &wired) != KERN_SUCCESS) {
816 return EFAULT;
817 }
818
819 if ((share == PROCESS_SHARE) ||
820 (share == AUTO_SHARE &&
821 VM_INHERIT_SHARE == entry->inheritance)) {
822 key->shared = 1;
823 key->info.shared.offset = entry->offset + entry->start -
824 (vm_offset_t)addr;
825 vm_object_reference(key->info.shared.object);
826 } else {
827 key->shared = 0;
828 key->info.private.vs = td->td_proc->p_vmspace;
829 key->info.private.addr = (uintptr_t)addr;
830 }
831 vm_map_lookup_done(map, entry);
832 }
833
834 umtxq_hash(key);
835 return (0);
836 }
837
838 /*
839 * Release key.
840 */
841 void
842 umtx_key_release(struct umtx_key *key)
843 {
844 if (key->shared)
845 vm_object_deallocate(key->info.shared.object);
846 }
847
848 /*
849 * Lock a umtx object.
850 */
851 static int
852 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id,
853 const struct timespec *timeout)
854 {
855 struct abs_timeout timo;
856 struct umtx_q *uq;
857 u_long owner;
858 u_long old;
859 int error = 0;
860
861 uq = td->td_umtxq;
862 if (timeout != NULL)
863 abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
864
865 /*
866 * Care must be exercised when dealing with umtx structure. It
867 * can fault on any access.
868 */
869 for (;;) {
870 /*
871 * Try the uncontested case. This should be done in userland.
872 */
873 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id);
874
875 /* The acquire succeeded. */
876 if (owner == UMTX_UNOWNED)
877 return (0);
878
879 /* The address was invalid. */
880 if (owner == -1)
881 return (EFAULT);
882
883 /* If no one owns it but it is contested try to acquire it. */
884 if (owner == UMTX_CONTESTED) {
885 owner = casuword(&umtx->u_owner,
886 UMTX_CONTESTED, id | UMTX_CONTESTED);
887
888 if (owner == UMTX_CONTESTED)
889 return (0);
890
891 /* The address was invalid. */
892 if (owner == -1)
893 return (EFAULT);
894
895 error = umtxq_check_susp(td);
896 if (error != 0)
897 break;
898
899 /* If this failed the lock has changed, restart. */
900 continue;
901 }
902
903 /*
904 * If we caught a signal, we have retried and now
905 * exit immediately.
906 */
907 if (error != 0)
908 break;
909
910 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK,
911 AUTO_SHARE, &uq->uq_key)) != 0)
912 return (error);
913
914 umtxq_lock(&uq->uq_key);
915 umtxq_busy(&uq->uq_key);
916 umtxq_insert(uq);
917 umtxq_unbusy(&uq->uq_key);
918 umtxq_unlock(&uq->uq_key);
919
920 /*
921 * Set the contested bit so that a release in user space
922 * knows to use the system call for unlock. If this fails
923 * either some one else has acquired the lock or it has been
924 * released.
925 */
926 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
927
928 /* The address was invalid. */
929 if (old == -1) {
930 umtxq_lock(&uq->uq_key);
931 umtxq_remove(uq);
932 umtxq_unlock(&uq->uq_key);
933 umtx_key_release(&uq->uq_key);
934 return (EFAULT);
935 }
936
937 /*
938 * We set the contested bit, sleep. Otherwise the lock changed
939 * and we need to retry or we lost a race to the thread
940 * unlocking the umtx.
941 */
942 umtxq_lock(&uq->uq_key);
943 if (old == owner)
944 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL :
945 &timo);
946 umtxq_remove(uq);
947 umtxq_unlock(&uq->uq_key);
948 umtx_key_release(&uq->uq_key);
949
950 if (error == 0)
951 error = umtxq_check_susp(td);
952 }
953
954 if (timeout == NULL) {
955 /* Mutex locking is restarted if it is interrupted. */
956 if (error == EINTR)
957 error = ERESTART;
958 } else {
959 /* Timed-locking is not restarted. */
960 if (error == ERESTART)
961 error = EINTR;
962 }
963 return (error);
964 }
965
966 /*
967 * Unlock a umtx object.
968 */
969 static int
970 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id)
971 {
972 struct umtx_key key;
973 u_long owner;
974 u_long old;
975 int error;
976 int count;
977
978 /*
979 * Make sure we own this mtx.
980 */
981 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
982 if (owner == -1)
983 return (EFAULT);
984
985 if ((owner & ~UMTX_CONTESTED) != id)
986 return (EPERM);
987
988 /* This should be done in userland */
989 if ((owner & UMTX_CONTESTED) == 0) {
990 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
991 if (old == -1)
992 return (EFAULT);
993 if (old == owner)
994 return (0);
995 owner = old;
996 }
997
998 /* We should only ever be in here for contested locks */
999 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1000 &key)) != 0)
1001 return (error);
1002
1003 umtxq_lock(&key);
1004 umtxq_busy(&key);
1005 count = umtxq_count(&key);
1006 umtxq_unlock(&key);
1007
1008 /*
1009 * When unlocking the umtx, it must be marked as unowned if
1010 * there is zero or one thread only waiting for it.
1011 * Otherwise, it must be marked as contested.
1012 */
1013 old = casuword(&umtx->u_owner, owner,
1014 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
1015 umtxq_lock(&key);
1016 umtxq_signal(&key,1);
1017 umtxq_unbusy(&key);
1018 umtxq_unlock(&key);
1019 umtx_key_release(&key);
1020 if (old == -1)
1021 return (EFAULT);
1022 if (old != owner)
1023 return (EINVAL);
1024 return (0);
1025 }
1026
1027 #ifdef COMPAT_FREEBSD32
1028
1029 /*
1030 * Lock a umtx object.
1031 */
1032 static int
1033 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id,
1034 const struct timespec *timeout)
1035 {
1036 struct abs_timeout timo;
1037 struct umtx_q *uq;
1038 uint32_t owner;
1039 uint32_t old;
1040 int error = 0;
1041
1042 uq = td->td_umtxq;
1043
1044 if (timeout != NULL)
1045 abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout);
1046
1047 /*
1048 * Care must be exercised when dealing with umtx structure. It
1049 * can fault on any access.
1050 */
1051 for (;;) {
1052 /*
1053 * Try the uncontested case. This should be done in userland.
1054 */
1055 owner = casuword32(m, UMUTEX_UNOWNED, id);
1056
1057 /* The acquire succeeded. */
1058 if (owner == UMUTEX_UNOWNED)
1059 return (0);
1060
1061 /* The address was invalid. */
1062 if (owner == -1)
1063 return (EFAULT);
1064
1065 /* If no one owns it but it is contested try to acquire it. */
1066 if (owner == UMUTEX_CONTESTED) {
1067 owner = casuword32(m,
1068 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED);
1069 if (owner == UMUTEX_CONTESTED)
1070 return (0);
1071
1072 /* The address was invalid. */
1073 if (owner == -1)
1074 return (EFAULT);
1075
1076 error = umtxq_check_susp(td);
1077 if (error != 0)
1078 break;
1079
1080 /* If this failed the lock has changed, restart. */
1081 continue;
1082 }
1083
1084 /*
1085 * If we caught a signal, we have retried and now
1086 * exit immediately.
1087 */
1088 if (error != 0)
1089 return (error);
1090
1091 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK,
1092 AUTO_SHARE, &uq->uq_key)) != 0)
1093 return (error);
1094
1095 umtxq_lock(&uq->uq_key);
1096 umtxq_busy(&uq->uq_key);
1097 umtxq_insert(uq);
1098 umtxq_unbusy(&uq->uq_key);
1099 umtxq_unlock(&uq->uq_key);
1100
1101 /*
1102 * Set the contested bit so that a release in user space
1103 * knows to use the system call for unlock. If this fails
1104 * either some one else has acquired the lock or it has been
1105 * released.
1106 */
1107 old = casuword32(m, owner, owner | UMUTEX_CONTESTED);
1108
1109 /* The address was invalid. */
1110 if (old == -1) {
1111 umtxq_lock(&uq->uq_key);
1112 umtxq_remove(uq);
1113 umtxq_unlock(&uq->uq_key);
1114 umtx_key_release(&uq->uq_key);
1115 return (EFAULT);
1116 }
1117
1118 /*
1119 * We set the contested bit, sleep. Otherwise the lock changed
1120 * and we need to retry or we lost a race to the thread
1121 * unlocking the umtx.
1122 */
1123 umtxq_lock(&uq->uq_key);
1124 if (old == owner)
1125 error = umtxq_sleep(uq, "umtx", timeout == NULL ?
1126 NULL : &timo);
1127 umtxq_remove(uq);
1128 umtxq_unlock(&uq->uq_key);
1129 umtx_key_release(&uq->uq_key);
1130
1131 if (error == 0)
1132 error = umtxq_check_susp(td);
1133 }
1134
1135 if (timeout == NULL) {
1136 /* Mutex locking is restarted if it is interrupted. */
1137 if (error == EINTR)
1138 error = ERESTART;
1139 } else {
1140 /* Timed-locking is not restarted. */
1141 if (error == ERESTART)
1142 error = EINTR;
1143 }
1144 return (error);
1145 }
1146
1147 /*
1148 * Unlock a umtx object.
1149 */
1150 static int
1151 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id)
1152 {
1153 struct umtx_key key;
1154 uint32_t owner;
1155 uint32_t old;
1156 int error;
1157 int count;
1158
1159 /*
1160 * Make sure we own this mtx.
1161 */
1162 owner = fuword32(m);
1163 if (owner == -1)
1164 return (EFAULT);
1165
1166 if ((owner & ~UMUTEX_CONTESTED) != id)
1167 return (EPERM);
1168
1169 /* This should be done in userland */
1170 if ((owner & UMUTEX_CONTESTED) == 0) {
1171 old = casuword32(m, owner, UMUTEX_UNOWNED);
1172 if (old == -1)
1173 return (EFAULT);
1174 if (old == owner)
1175 return (0);
1176 owner = old;
1177 }
1178
1179 /* We should only ever be in here for contested locks */
1180 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1181 &key)) != 0)
1182 return (error);
1183
1184 umtxq_lock(&key);
1185 umtxq_busy(&key);
1186 count = umtxq_count(&key);
1187 umtxq_unlock(&key);
1188
1189 /*
1190 * When unlocking the umtx, it must be marked as unowned if
1191 * there is zero or one thread only waiting for it.
1192 * Otherwise, it must be marked as contested.
1193 */
1194 old = casuword32(m, owner,
1195 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1196 umtxq_lock(&key);
1197 umtxq_signal(&key,1);
1198 umtxq_unbusy(&key);
1199 umtxq_unlock(&key);
1200 umtx_key_release(&key);
1201 if (old == -1)
1202 return (EFAULT);
1203 if (old != owner)
1204 return (EINVAL);
1205 return (0);
1206 }
1207 #endif
1208
1209 /*
1210 * Fetch and compare value, sleep on the address if value is not changed.
1211 */
1212 static int
1213 do_wait(struct thread *td, void *addr, u_long id,
1214 struct _umtx_time *timeout, int compat32, int is_private)
1215 {
1216 struct abs_timeout timo;
1217 struct umtx_q *uq;
1218 u_long tmp;
1219 uint32_t tmp32;
1220 int error = 0;
1221
1222 uq = td->td_umtxq;
1223 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT,
1224 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1225 return (error);
1226
1227 if (timeout != NULL)
1228 abs_timeout_init2(&timo, timeout);
1229
1230 umtxq_lock(&uq->uq_key);
1231 umtxq_insert(uq);
1232 umtxq_unlock(&uq->uq_key);
1233 if (compat32 == 0) {
1234 error = fueword(addr, &tmp);
1235 if (error != 0)
1236 error = EFAULT;
1237 } else {
1238 error = fueword32(addr, &tmp32);
1239 if (error == 0)
1240 tmp = tmp32;
1241 else
1242 error = EFAULT;
1243 }
1244 umtxq_lock(&uq->uq_key);
1245 if (error == 0) {
1246 if (tmp == id)
1247 error = umtxq_sleep(uq, "uwait", timeout == NULL ?
1248 NULL : &timo);
1249 if ((uq->uq_flags & UQF_UMTXQ) == 0)
1250 error = 0;
1251 else
1252 umtxq_remove(uq);
1253 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
1254 umtxq_remove(uq);
1255 }
1256 umtxq_unlock(&uq->uq_key);
1257 umtx_key_release(&uq->uq_key);
1258 if (error == ERESTART)
1259 error = EINTR;
1260 return (error);
1261 }
1262
1263 /*
1264 * Wake up threads sleeping on the specified address.
1265 */
1266 int
1267 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
1268 {
1269 struct umtx_key key;
1270 int ret;
1271
1272 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT,
1273 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1274 return (ret);
1275 umtxq_lock(&key);
1276 ret = umtxq_signal(&key, n_wake);
1277 umtxq_unlock(&key);
1278 umtx_key_release(&key);
1279 return (0);
1280 }
1281
1282 /*
1283 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex.
1284 */
1285 static int
1286 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags,
1287 struct _umtx_time *timeout, int mode)
1288 {
1289 struct abs_timeout timo;
1290 struct umtx_q *uq;
1291 uint32_t owner, old, id;
1292 int error, rv;
1293
1294 id = td->td_tid;
1295 uq = td->td_umtxq;
1296 error = 0;
1297 if (timeout != NULL)
1298 abs_timeout_init2(&timo, timeout);
1299
1300 /*
1301 * Care must be exercised when dealing with umtx structure. It
1302 * can fault on any access.
1303 */
1304 for (;;) {
1305 rv = fueword32(&m->m_owner, &owner);
1306 if (rv == -1)
1307 return (EFAULT);
1308 if (mode == _UMUTEX_WAIT) {
1309 if (owner == UMUTEX_UNOWNED || owner == UMUTEX_CONTESTED)
1310 return (0);
1311 } else {
1312 /*
1313 * Try the uncontested case. This should be done in userland.
1314 */
1315 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1316 &owner, id);
1317 /* The address was invalid. */
1318 if (rv == -1)
1319 return (EFAULT);
1320
1321 /* The acquire succeeded. */
1322 if (owner == UMUTEX_UNOWNED)
1323 return (0);
1324
1325 /* If no one owns it but it is contested try to acquire it. */
1326 if (owner == UMUTEX_CONTESTED) {
1327 rv = casueword32(&m->m_owner,
1328 UMUTEX_CONTESTED, &owner,
1329 id | UMUTEX_CONTESTED);
1330 /* The address was invalid. */
1331 if (rv == -1)
1332 return (EFAULT);
1333
1334 if (owner == UMUTEX_CONTESTED)
1335 return (0);
1336
1337 rv = umtxq_check_susp(td);
1338 if (rv != 0)
1339 return (rv);
1340
1341 /* If this failed the lock has changed, restart. */
1342 continue;
1343 }
1344 }
1345
1346 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
1347 (owner & ~UMUTEX_CONTESTED) == id)
1348 return (EDEADLK);
1349
1350 if (mode == _UMUTEX_TRY)
1351 return (EBUSY);
1352
1353 /*
1354 * If we caught a signal, we have retried and now
1355 * exit immediately.
1356 */
1357 if (error != 0)
1358 return (error);
1359
1360 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX,
1361 GET_SHARE(flags), &uq->uq_key)) != 0)
1362 return (error);
1363
1364 umtxq_lock(&uq->uq_key);
1365 umtxq_busy(&uq->uq_key);
1366 umtxq_insert(uq);
1367 umtxq_unlock(&uq->uq_key);
1368
1369 /*
1370 * Set the contested bit so that a release in user space
1371 * knows to use the system call for unlock. If this fails
1372 * either some one else has acquired the lock or it has been
1373 * released.
1374 */
1375 rv = casueword32(&m->m_owner, owner, &old,
1376 owner | UMUTEX_CONTESTED);
1377
1378 /* The address was invalid. */
1379 if (rv == -1) {
1380 umtxq_lock(&uq->uq_key);
1381 umtxq_remove(uq);
1382 umtxq_unbusy(&uq->uq_key);
1383 umtxq_unlock(&uq->uq_key);
1384 umtx_key_release(&uq->uq_key);
1385 return (EFAULT);
1386 }
1387
1388 /*
1389 * We set the contested bit, sleep. Otherwise the lock changed
1390 * and we need to retry or we lost a race to the thread
1391 * unlocking the umtx.
1392 */
1393 umtxq_lock(&uq->uq_key);
1394 umtxq_unbusy(&uq->uq_key);
1395 if (old == owner)
1396 error = umtxq_sleep(uq, "umtxn", timeout == NULL ?
1397 NULL : &timo);
1398 umtxq_remove(uq);
1399 umtxq_unlock(&uq->uq_key);
1400 umtx_key_release(&uq->uq_key);
1401
1402 if (error == 0)
1403 error = umtxq_check_susp(td);
1404 }
1405
1406 return (0);
1407 }
1408
1409 /*
1410 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex.
1411 */
1412 static int
1413 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags)
1414 {
1415 struct umtx_key key;
1416 uint32_t owner, old, id;
1417 int error;
1418 int count;
1419
1420 id = td->td_tid;
1421 /*
1422 * Make sure we own this mtx.
1423 */
1424 error = fueword32(&m->m_owner, &owner);
1425 if (error == -1)
1426 return (EFAULT);
1427
1428 if ((owner & ~UMUTEX_CONTESTED) != id)
1429 return (EPERM);
1430
1431 if ((owner & UMUTEX_CONTESTED) == 0) {
1432 error = casueword32(&m->m_owner, owner, &old, UMUTEX_UNOWNED);
1433 if (error == -1)
1434 return (EFAULT);
1435 if (old == owner)
1436 return (0);
1437 owner = old;
1438 }
1439
1440 /* We should only ever be in here for contested locks */
1441 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1442 &key)) != 0)
1443 return (error);
1444
1445 umtxq_lock(&key);
1446 umtxq_busy(&key);
1447 count = umtxq_count(&key);
1448 umtxq_unlock(&key);
1449
1450 /*
1451 * When unlocking the umtx, it must be marked as unowned if
1452 * there is zero or one thread only waiting for it.
1453 * Otherwise, it must be marked as contested.
1454 */
1455 error = casueword32(&m->m_owner, owner, &old,
1456 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1457 umtxq_lock(&key);
1458 umtxq_signal(&key,1);
1459 umtxq_unbusy(&key);
1460 umtxq_unlock(&key);
1461 umtx_key_release(&key);
1462 if (error == -1)
1463 return (EFAULT);
1464 if (old != owner)
1465 return (EINVAL);
1466 return (0);
1467 }
1468
1469 /*
1470 * Check if the mutex is available and wake up a waiter,
1471 * only for simple mutex.
1472 */
1473 static int
1474 do_wake_umutex(struct thread *td, struct umutex *m)
1475 {
1476 struct umtx_key key;
1477 uint32_t owner;
1478 uint32_t flags;
1479 int error;
1480 int count;
1481
1482 error = fueword32(&m->m_owner, &owner);
1483 if (error == -1)
1484 return (EFAULT);
1485
1486 if ((owner & ~UMUTEX_CONTESTED) != 0)
1487 return (0);
1488
1489 error = fueword32(&m->m_flags, &flags);
1490 if (error == -1)
1491 return (EFAULT);
1492
1493 /* We should only ever be in here for contested locks */
1494 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags),
1495 &key)) != 0)
1496 return (error);
1497
1498 umtxq_lock(&key);
1499 umtxq_busy(&key);
1500 count = umtxq_count(&key);
1501 umtxq_unlock(&key);
1502
1503 if (count <= 1) {
1504 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1505 UMUTEX_UNOWNED);
1506 if (error == -1)
1507 error = EFAULT;
1508 }
1509
1510 umtxq_lock(&key);
1511 if (error == 0 && count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1512 umtxq_signal(&key, 1);
1513 umtxq_unbusy(&key);
1514 umtxq_unlock(&key);
1515 umtx_key_release(&key);
1516 return (error);
1517 }
1518
1519 /*
1520 * Check if the mutex has waiters and tries to fix contention bit.
1521 */
1522 static int
1523 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
1524 {
1525 struct umtx_key key;
1526 uint32_t owner, old;
1527 int type;
1528 int error;
1529 int count;
1530
1531 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
1532 case 0:
1533 type = TYPE_NORMAL_UMUTEX;
1534 break;
1535 case UMUTEX_PRIO_INHERIT:
1536 type = TYPE_PI_UMUTEX;
1537 break;
1538 case UMUTEX_PRIO_PROTECT:
1539 type = TYPE_PP_UMUTEX;
1540 break;
1541 default:
1542 return (EINVAL);
1543 }
1544 if ((error = umtx_key_get(m, type, GET_SHARE(flags),
1545 &key)) != 0)
1546 return (error);
1547
1548 owner = 0;
1549 umtxq_lock(&key);
1550 umtxq_busy(&key);
1551 count = umtxq_count(&key);
1552 umtxq_unlock(&key);
1553 /*
1554 * Only repair contention bit if there is a waiter, this means the mutex
1555 * is still being referenced by userland code, otherwise don't update
1556 * any memory.
1557 */
1558 if (count > 1) {
1559 error = fueword32(&m->m_owner, &owner);
1560 if (error == -1)
1561 error = EFAULT;
1562 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0) {
1563 error = casueword32(&m->m_owner, owner, &old,
1564 owner | UMUTEX_CONTESTED);
1565 if (error == -1) {
1566 error = EFAULT;
1567 break;
1568 }
1569 if (old == owner)
1570 break;
1571 owner = old;
1572 error = umtxq_check_susp(td);
1573 if (error != 0)
1574 break;
1575 }
1576 } else if (count == 1) {
1577 error = fueword32(&m->m_owner, &owner);
1578 if (error == -1)
1579 error = EFAULT;
1580 while (error == 0 && (owner & ~UMUTEX_CONTESTED) != 0 &&
1581 (owner & UMUTEX_CONTESTED) == 0) {
1582 error = casueword32(&m->m_owner, owner, &old,
1583 owner | UMUTEX_CONTESTED);
1584 if (error == -1) {
1585 error = EFAULT;
1586 break;
1587 }
1588 if (old == owner)
1589 break;
1590 owner = old;
1591 error = umtxq_check_susp(td);
1592 if (error != 0)
1593 break;
1594 }
1595 }
1596 umtxq_lock(&key);
1597 if (error == EFAULT) {
1598 umtxq_signal(&key, INT_MAX);
1599 } else if (count != 0 && (owner & ~UMUTEX_CONTESTED) == 0)
1600 umtxq_signal(&key, 1);
1601 umtxq_unbusy(&key);
1602 umtxq_unlock(&key);
1603 umtx_key_release(&key);
1604 return (error);
1605 }
1606
1607 static inline struct umtx_pi *
1608 umtx_pi_alloc(int flags)
1609 {
1610 struct umtx_pi *pi;
1611
1612 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags);
1613 TAILQ_INIT(&pi->pi_blocked);
1614 atomic_add_int(&umtx_pi_allocated, 1);
1615 return (pi);
1616 }
1617
1618 static inline void
1619 umtx_pi_free(struct umtx_pi *pi)
1620 {
1621 uma_zfree(umtx_pi_zone, pi);
1622 atomic_add_int(&umtx_pi_allocated, -1);
1623 }
1624
1625 /*
1626 * Adjust the thread's position on a pi_state after its priority has been
1627 * changed.
1628 */
1629 static int
1630 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
1631 {
1632 struct umtx_q *uq, *uq1, *uq2;
1633 struct thread *td1;
1634
1635 mtx_assert(&umtx_lock, MA_OWNED);
1636 if (pi == NULL)
1637 return (0);
1638
1639 uq = td->td_umtxq;
1640
1641 /*
1642 * Check if the thread needs to be moved on the blocked chain.
1643 * It needs to be moved if either its priority is lower than
1644 * the previous thread or higher than the next thread.
1645 */
1646 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1647 uq2 = TAILQ_NEXT(uq, uq_lockq);
1648 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) ||
1649 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) {
1650 /*
1651 * Remove thread from blocked chain and determine where
1652 * it should be moved to.
1653 */
1654 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1655 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1656 td1 = uq1->uq_thread;
1657 MPASS(td1->td_proc->p_magic == P_MAGIC);
1658 if (UPRI(td1) > UPRI(td))
1659 break;
1660 }
1661
1662 if (uq1 == NULL)
1663 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1664 else
1665 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1666 }
1667 return (1);
1668 }
1669
1670 static struct umtx_pi *
1671 umtx_pi_next(struct umtx_pi *pi)
1672 {
1673 struct umtx_q *uq_owner;
1674
1675 if (pi->pi_owner == NULL)
1676 return (NULL);
1677 uq_owner = pi->pi_owner->td_umtxq;
1678 if (uq_owner == NULL)
1679 return (NULL);
1680 return (uq_owner->uq_pi_blocked);
1681 }
1682
1683 /*
1684 * Floyd's Cycle-Finding Algorithm.
1685 */
1686 static bool
1687 umtx_pi_check_loop(struct umtx_pi *pi)
1688 {
1689 struct umtx_pi *pi1; /* fast iterator */
1690
1691 mtx_assert(&umtx_lock, MA_OWNED);
1692 if (pi == NULL)
1693 return (false);
1694 pi1 = pi;
1695 for (;;) {
1696 pi = umtx_pi_next(pi);
1697 if (pi == NULL)
1698 break;
1699 pi1 = umtx_pi_next(pi1);
1700 if (pi1 == NULL)
1701 break;
1702 pi1 = umtx_pi_next(pi1);
1703 if (pi1 == NULL)
1704 break;
1705 if (pi == pi1)
1706 return (true);
1707 }
1708 return (false);
1709 }
1710
1711 /*
1712 * Propagate priority when a thread is blocked on POSIX
1713 * PI mutex.
1714 */
1715 static void
1716 umtx_propagate_priority(struct thread *td)
1717 {
1718 struct umtx_q *uq;
1719 struct umtx_pi *pi;
1720 int pri;
1721
1722 mtx_assert(&umtx_lock, MA_OWNED);
1723 pri = UPRI(td);
1724 uq = td->td_umtxq;
1725 pi = uq->uq_pi_blocked;
1726 if (pi == NULL)
1727 return;
1728 if (umtx_pi_check_loop(pi))
1729 return;
1730
1731 for (;;) {
1732 td = pi->pi_owner;
1733 if (td == NULL || td == curthread)
1734 return;
1735
1736 MPASS(td->td_proc != NULL);
1737 MPASS(td->td_proc->p_magic == P_MAGIC);
1738
1739 thread_lock(td);
1740 if (td->td_lend_user_pri > pri)
1741 sched_lend_user_prio(td, pri);
1742 else {
1743 thread_unlock(td);
1744 break;
1745 }
1746 thread_unlock(td);
1747
1748 /*
1749 * Pick up the lock that td is blocked on.
1750 */
1751 uq = td->td_umtxq;
1752 pi = uq->uq_pi_blocked;
1753 if (pi == NULL)
1754 break;
1755 /* Resort td on the list if needed. */
1756 umtx_pi_adjust_thread(pi, td);
1757 }
1758 }
1759
1760 /*
1761 * Unpropagate priority for a PI mutex when a thread blocked on
1762 * it is interrupted by signal or resumed by others.
1763 */
1764 static void
1765 umtx_repropagate_priority(struct umtx_pi *pi)
1766 {
1767 struct umtx_q *uq, *uq_owner;
1768 struct umtx_pi *pi2;
1769 int pri;
1770
1771 mtx_assert(&umtx_lock, MA_OWNED);
1772
1773 if (umtx_pi_check_loop(pi))
1774 return;
1775 while (pi != NULL && pi->pi_owner != NULL) {
1776 pri = PRI_MAX;
1777 uq_owner = pi->pi_owner->td_umtxq;
1778
1779 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1780 uq = TAILQ_FIRST(&pi2->pi_blocked);
1781 if (uq != NULL) {
1782 if (pri > UPRI(uq->uq_thread))
1783 pri = UPRI(uq->uq_thread);
1784 }
1785 }
1786
1787 if (pri > uq_owner->uq_inherited_pri)
1788 pri = uq_owner->uq_inherited_pri;
1789 thread_lock(pi->pi_owner);
1790 sched_lend_user_prio(pi->pi_owner, pri);
1791 thread_unlock(pi->pi_owner);
1792 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1793 umtx_pi_adjust_thread(pi, uq_owner->uq_thread);
1794 }
1795 }
1796
1797 /*
1798 * Insert a PI mutex into owned list.
1799 */
1800 static void
1801 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
1802 {
1803 struct umtx_q *uq_owner;
1804
1805 uq_owner = owner->td_umtxq;
1806 mtx_assert(&umtx_lock, MA_OWNED);
1807 if (pi->pi_owner != NULL)
1808 panic("pi_ower != NULL");
1809 pi->pi_owner = owner;
1810 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1811 }
1812
1813
1814 /*
1815 * Disown a PI mutex, and remove it from the owned list.
1816 */
1817 static void
1818 umtx_pi_disown(struct umtx_pi *pi)
1819 {
1820
1821 mtx_assert(&umtx_lock, MA_OWNED);
1822 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1823 pi->pi_owner = NULL;
1824 }
1825
1826 /*
1827 * Claim ownership of a PI mutex.
1828 */
1829 static int
1830 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
1831 {
1832 struct umtx_q *uq, *uq_owner;
1833
1834 uq_owner = owner->td_umtxq;
1835 mtx_lock(&umtx_lock);
1836 if (pi->pi_owner == owner) {
1837 mtx_unlock(&umtx_lock);
1838 return (0);
1839 }
1840
1841 if (pi->pi_owner != NULL) {
1842 /*
1843 * userland may have already messed the mutex, sigh.
1844 */
1845 mtx_unlock(&umtx_lock);
1846 return (EPERM);
1847 }
1848 umtx_pi_setowner(pi, owner);
1849 uq = TAILQ_FIRST(&pi->pi_blocked);
1850 if (uq != NULL) {
1851 int pri;
1852
1853 pri = UPRI(uq->uq_thread);
1854 thread_lock(owner);
1855 if (pri < UPRI(owner))
1856 sched_lend_user_prio(owner, pri);
1857 thread_unlock(owner);
1858 }
1859 mtx_unlock(&umtx_lock);
1860 return (0);
1861 }
1862
1863 /*
1864 * Adjust a thread's order position in its blocked PI mutex,
1865 * this may result new priority propagating process.
1866 */
1867 void
1868 umtx_pi_adjust(struct thread *td, u_char oldpri)
1869 {
1870 struct umtx_q *uq;
1871 struct umtx_pi *pi;
1872
1873 uq = td->td_umtxq;
1874 mtx_lock(&umtx_lock);
1875 /*
1876 * Pick up the lock that td is blocked on.
1877 */
1878 pi = uq->uq_pi_blocked;
1879 if (pi != NULL) {
1880 umtx_pi_adjust_thread(pi, td);
1881 umtx_repropagate_priority(pi);
1882 }
1883 mtx_unlock(&umtx_lock);
1884 }
1885
1886 /*
1887 * Sleep on a PI mutex.
1888 */
1889 static int
1890 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi,
1891 uint32_t owner, const char *wmesg, struct abs_timeout *timo)
1892 {
1893 struct umtxq_chain *uc;
1894 struct thread *td, *td1;
1895 struct umtx_q *uq1;
1896 int pri;
1897 int error = 0;
1898
1899 td = uq->uq_thread;
1900 KASSERT(td == curthread, ("inconsistent uq_thread"));
1901 uc = umtxq_getchain(&uq->uq_key);
1902 UMTXQ_LOCKED_ASSERT(uc);
1903 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy"));
1904 umtxq_insert(uq);
1905 mtx_lock(&umtx_lock);
1906 if (pi->pi_owner == NULL) {
1907 mtx_unlock(&umtx_lock);
1908 /* XXX Only look up thread in current process. */
1909 td1 = tdfind(owner, curproc->p_pid);
1910 mtx_lock(&umtx_lock);
1911 if (td1 != NULL) {
1912 if (pi->pi_owner == NULL)
1913 umtx_pi_setowner(pi, td1);
1914 PROC_UNLOCK(td1->td_proc);
1915 }
1916 }
1917
1918 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1919 pri = UPRI(uq1->uq_thread);
1920 if (pri > UPRI(td))
1921 break;
1922 }
1923
1924 if (uq1 != NULL)
1925 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1926 else
1927 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1928
1929 uq->uq_pi_blocked = pi;
1930 thread_lock(td);
1931 td->td_flags |= TDF_UPIBLOCKED;
1932 thread_unlock(td);
1933 umtx_propagate_priority(td);
1934 mtx_unlock(&umtx_lock);
1935 umtxq_unbusy(&uq->uq_key);
1936
1937 error = umtxq_sleep(uq, wmesg, timo);
1938 umtxq_remove(uq);
1939
1940 mtx_lock(&umtx_lock);
1941 uq->uq_pi_blocked = NULL;
1942 thread_lock(td);
1943 td->td_flags &= ~TDF_UPIBLOCKED;
1944 thread_unlock(td);
1945 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1946 umtx_repropagate_priority(pi);
1947 mtx_unlock(&umtx_lock);
1948 umtxq_unlock(&uq->uq_key);
1949
1950 return (error);
1951 }
1952
1953 /*
1954 * Add reference count for a PI mutex.
1955 */
1956 static void
1957 umtx_pi_ref(struct umtx_pi *pi)
1958 {
1959 struct umtxq_chain *uc;
1960
1961 uc = umtxq_getchain(&pi->pi_key);
1962 UMTXQ_LOCKED_ASSERT(uc);
1963 pi->pi_refcount++;
1964 }
1965
1966 /*
1967 * Decrease reference count for a PI mutex, if the counter
1968 * is decreased to zero, its memory space is freed.
1969 */
1970 static void
1971 umtx_pi_unref(struct umtx_pi *pi)
1972 {
1973 struct umtxq_chain *uc;
1974
1975 uc = umtxq_getchain(&pi->pi_key);
1976 UMTXQ_LOCKED_ASSERT(uc);
1977 KASSERT(pi->pi_refcount > 0, ("invalid reference count"));
1978 if (--pi->pi_refcount == 0) {
1979 mtx_lock(&umtx_lock);
1980 if (pi->pi_owner != NULL) {
1981 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested,
1982 pi, pi_link);
1983 pi->pi_owner = NULL;
1984 }
1985 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
1986 ("blocked queue not empty"));
1987 mtx_unlock(&umtx_lock);
1988 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
1989 umtx_pi_free(pi);
1990 }
1991 }
1992
1993 /*
1994 * Find a PI mutex in hash table.
1995 */
1996 static struct umtx_pi *
1997 umtx_pi_lookup(struct umtx_key *key)
1998 {
1999 struct umtxq_chain *uc;
2000 struct umtx_pi *pi;
2001
2002 uc = umtxq_getchain(key);
2003 UMTXQ_LOCKED_ASSERT(uc);
2004
2005 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
2006 if (umtx_key_match(&pi->pi_key, key)) {
2007 return (pi);
2008 }
2009 }
2010 return (NULL);
2011 }
2012
2013 /*
2014 * Insert a PI mutex into hash table.
2015 */
2016 static inline void
2017 umtx_pi_insert(struct umtx_pi *pi)
2018 {
2019 struct umtxq_chain *uc;
2020
2021 uc = umtxq_getchain(&pi->pi_key);
2022 UMTXQ_LOCKED_ASSERT(uc);
2023 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
2024 }
2025
2026 /*
2027 * Lock a PI mutex.
2028 */
2029 static int
2030 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags,
2031 struct _umtx_time *timeout, int try)
2032 {
2033 struct abs_timeout timo;
2034 struct umtx_q *uq;
2035 struct umtx_pi *pi, *new_pi;
2036 uint32_t id, owner, old;
2037 int error, rv;
2038
2039 id = td->td_tid;
2040 uq = td->td_umtxq;
2041
2042 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
2043 &uq->uq_key)) != 0)
2044 return (error);
2045
2046 if (timeout != NULL)
2047 abs_timeout_init2(&timo, timeout);
2048
2049 umtxq_lock(&uq->uq_key);
2050 pi = umtx_pi_lookup(&uq->uq_key);
2051 if (pi == NULL) {
2052 new_pi = umtx_pi_alloc(M_NOWAIT);
2053 if (new_pi == NULL) {
2054 umtxq_unlock(&uq->uq_key);
2055 new_pi = umtx_pi_alloc(M_WAITOK);
2056 umtxq_lock(&uq->uq_key);
2057 pi = umtx_pi_lookup(&uq->uq_key);
2058 if (pi != NULL) {
2059 umtx_pi_free(new_pi);
2060 new_pi = NULL;
2061 }
2062 }
2063 if (new_pi != NULL) {
2064 new_pi->pi_key = uq->uq_key;
2065 umtx_pi_insert(new_pi);
2066 pi = new_pi;
2067 }
2068 }
2069 umtx_pi_ref(pi);
2070 umtxq_unlock(&uq->uq_key);
2071
2072 /*
2073 * Care must be exercised when dealing with umtx structure. It
2074 * can fault on any access.
2075 */
2076 for (;;) {
2077 /*
2078 * Try the uncontested case. This should be done in userland.
2079 */
2080 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id);
2081 /* The address was invalid. */
2082 if (rv == -1) {
2083 error = EFAULT;
2084 break;
2085 }
2086
2087 /* The acquire succeeded. */
2088 if (owner == UMUTEX_UNOWNED) {
2089 error = 0;
2090 break;
2091 }
2092
2093 /* If no one owns it but it is contested try to acquire it. */
2094 if (owner == UMUTEX_CONTESTED) {
2095 rv = casueword32(&m->m_owner,
2096 UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED);
2097 /* The address was invalid. */
2098 if (rv == -1) {
2099 error = EFAULT;
2100 break;
2101 }
2102
2103 if (owner == UMUTEX_CONTESTED) {
2104 umtxq_lock(&uq->uq_key);
2105 umtxq_busy(&uq->uq_key);
2106 error = umtx_pi_claim(pi, td);
2107 umtxq_unbusy(&uq->uq_key);
2108 umtxq_unlock(&uq->uq_key);
2109 if (error != 0) {
2110 /*
2111 * Since we're going to return an
2112 * error, restore the m_owner to its
2113 * previous, unowned state to avoid
2114 * compounding the problem.
2115 */
2116 (void)casuword32(&m->m_owner,
2117 id | UMUTEX_CONTESTED,
2118 UMUTEX_CONTESTED);
2119 }
2120 break;
2121 }
2122
2123 error = umtxq_check_susp(td);
2124 if (error != 0)
2125 break;
2126
2127 /* If this failed the lock has changed, restart. */
2128 continue;
2129 }
2130
2131 if ((owner & ~UMUTEX_CONTESTED) == id) {
2132 error = EDEADLK;
2133 break;
2134 }
2135
2136 if (try != 0) {
2137 error = EBUSY;
2138 break;
2139 }
2140
2141 /*
2142 * If we caught a signal, we have retried and now
2143 * exit immediately.
2144 */
2145 if (error != 0)
2146 break;
2147
2148 umtxq_lock(&uq->uq_key);
2149 umtxq_busy(&uq->uq_key);
2150 umtxq_unlock(&uq->uq_key);
2151
2152 /*
2153 * Set the contested bit so that a release in user space
2154 * knows to use the system call for unlock. If this fails
2155 * either some one else has acquired the lock or it has been
2156 * released.
2157 */
2158 rv = casueword32(&m->m_owner, owner, &old,
2159 owner | UMUTEX_CONTESTED);
2160
2161 /* The address was invalid. */
2162 if (rv == -1) {
2163 umtxq_unbusy_unlocked(&uq->uq_key);
2164 error = EFAULT;
2165 break;
2166 }
2167
2168 umtxq_lock(&uq->uq_key);
2169 /*
2170 * We set the contested bit, sleep. Otherwise the lock changed
2171 * and we need to retry or we lost a race to the thread
2172 * unlocking the umtx.
2173 */
2174 if (old == owner) {
2175 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED,
2176 "umtxpi", timeout == NULL ? NULL : &timo);
2177 if (error != 0)
2178 continue;
2179 } else {
2180 umtxq_unbusy(&uq->uq_key);
2181 umtxq_unlock(&uq->uq_key);
2182 }
2183
2184 error = umtxq_check_susp(td);
2185 if (error != 0)
2186 break;
2187 }
2188
2189 umtxq_lock(&uq->uq_key);
2190 umtx_pi_unref(pi);
2191 umtxq_unlock(&uq->uq_key);
2192
2193 umtx_key_release(&uq->uq_key);
2194 return (error);
2195 }
2196
2197 /*
2198 * Unlock a PI mutex.
2199 */
2200 static int
2201 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags)
2202 {
2203 struct umtx_key key;
2204 struct umtx_q *uq_first, *uq_first2, *uq_me;
2205 struct umtx_pi *pi, *pi2;
2206 uint32_t owner, old, id;
2207 int error;
2208 int count;
2209 int pri;
2210
2211 id = td->td_tid;
2212 /*
2213 * Make sure we own this mtx.
2214 */
2215 error = fueword32(&m->m_owner, &owner);
2216 if (error == -1)
2217 return (EFAULT);
2218
2219 if ((owner & ~UMUTEX_CONTESTED) != id)
2220 return (EPERM);
2221
2222 /* This should be done in userland */
2223 if ((owner & UMUTEX_CONTESTED) == 0) {
2224 error = casueword32(&m->m_owner, owner, &old, UMUTEX_UNOWNED);
2225 if (error == -1)
2226 return (EFAULT);
2227 if (old == owner)
2228 return (0);
2229 owner = old;
2230 }
2231
2232 /* We should only ever be in here for contested locks */
2233 if ((error = umtx_key_get(m, TYPE_PI_UMUTEX, GET_SHARE(flags),
2234 &key)) != 0)
2235 return (error);
2236
2237 umtxq_lock(&key);
2238 umtxq_busy(&key);
2239 count = umtxq_count_pi(&key, &uq_first);
2240 if (uq_first != NULL) {
2241 mtx_lock(&umtx_lock);
2242 pi = uq_first->uq_pi_blocked;
2243 KASSERT(pi != NULL, ("pi == NULL?"));
2244 if (pi->pi_owner != curthread) {
2245 mtx_unlock(&umtx_lock);
2246 umtxq_unbusy(&key);
2247 umtxq_unlock(&key);
2248 umtx_key_release(&key);
2249 /* userland messed the mutex */
2250 return (EPERM);
2251 }
2252 uq_me = curthread->td_umtxq;
2253 umtx_pi_disown(pi);
2254 /* get highest priority thread which is still sleeping. */
2255 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2256 while (uq_first != NULL &&
2257 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2258 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2259 }
2260 pri = PRI_MAX;
2261 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2262 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2263 if (uq_first2 != NULL) {
2264 if (pri > UPRI(uq_first2->uq_thread))
2265 pri = UPRI(uq_first2->uq_thread);
2266 }
2267 }
2268 thread_lock(curthread);
2269 sched_lend_user_prio(curthread, pri);
2270 thread_unlock(curthread);
2271 mtx_unlock(&umtx_lock);
2272 if (uq_first)
2273 umtxq_signal_thread(uq_first);
2274 } else {
2275 pi = umtx_pi_lookup(&key);
2276 /*
2277 * A umtx_pi can exist if a signal or timeout removed the
2278 * last waiter from the umtxq, but there is still
2279 * a thread in do_lock_pi() holding the umtx_pi.
2280 */
2281 if (pi != NULL) {
2282 /*
2283 * The umtx_pi can be unowned, such as when a thread
2284 * has just entered do_lock_pi(), allocated the
2285 * umtx_pi, and unlocked the umtxq.
2286 * If the current thread owns it, it must disown it.
2287 */
2288 mtx_lock(&umtx_lock);
2289 if (pi->pi_owner == td)
2290 umtx_pi_disown(pi);
2291 mtx_unlock(&umtx_lock);
2292 }
2293 }
2294 umtxq_unlock(&key);
2295
2296 /*
2297 * When unlocking the umtx, it must be marked as unowned if
2298 * there is zero or one thread only waiting for it.
2299 * Otherwise, it must be marked as contested.
2300 */
2301 error = casueword32(&m->m_owner, owner, &old,
2302 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
2303
2304 umtxq_unbusy_unlocked(&key);
2305 umtx_key_release(&key);
2306 if (error == -1)
2307 return (EFAULT);
2308 if (old != owner)
2309 return (EINVAL);
2310 return (0);
2311 }
2312
2313 /*
2314 * Lock a PP mutex.
2315 */
2316 static int
2317 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags,
2318 struct _umtx_time *timeout, int try)
2319 {
2320 struct abs_timeout timo;
2321 struct umtx_q *uq, *uq2;
2322 struct umtx_pi *pi;
2323 uint32_t ceiling;
2324 uint32_t owner, id;
2325 int error, pri, old_inherited_pri, su, rv;
2326
2327 id = td->td_tid;
2328 uq = td->td_umtxq;
2329 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2330 &uq->uq_key)) != 0)
2331 return (error);
2332
2333 if (timeout != NULL)
2334 abs_timeout_init2(&timo, timeout);
2335
2336 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2337 for (;;) {
2338 old_inherited_pri = uq->uq_inherited_pri;
2339 umtxq_lock(&uq->uq_key);
2340 umtxq_busy(&uq->uq_key);
2341 umtxq_unlock(&uq->uq_key);
2342
2343 rv = fueword32(&m->m_ceilings[0], &ceiling);
2344 if (rv == -1) {
2345 error = EFAULT;
2346 goto out;
2347 }
2348 ceiling = RTP_PRIO_MAX - ceiling;
2349 if (ceiling > RTP_PRIO_MAX) {
2350 error = EINVAL;
2351 goto out;
2352 }
2353
2354 mtx_lock(&umtx_lock);
2355 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2356 mtx_unlock(&umtx_lock);
2357 error = EINVAL;
2358 goto out;
2359 }
2360 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2361 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2362 thread_lock(td);
2363 if (uq->uq_inherited_pri < UPRI(td))
2364 sched_lend_user_prio(td, uq->uq_inherited_pri);
2365 thread_unlock(td);
2366 }
2367 mtx_unlock(&umtx_lock);
2368
2369 rv = casueword32(&m->m_owner,
2370 UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED);
2371 /* The address was invalid. */
2372 if (rv == -1) {
2373 error = EFAULT;
2374 break;
2375 }
2376
2377 if (owner == UMUTEX_CONTESTED) {
2378 error = 0;
2379 break;
2380 }
2381
2382 if ((flags & UMUTEX_ERROR_CHECK) != 0 &&
2383 (owner & ~UMUTEX_CONTESTED) == id) {
2384 error = EDEADLK;
2385 break;
2386 }
2387
2388 if (try != 0) {
2389 error = EBUSY;
2390 break;
2391 }
2392
2393 /*
2394 * If we caught a signal, we have retried and now
2395 * exit immediately.
2396 */
2397 if (error != 0)
2398 break;
2399
2400 umtxq_lock(&uq->uq_key);
2401 umtxq_insert(uq);
2402 umtxq_unbusy(&uq->uq_key);
2403 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ?
2404 NULL : &timo);
2405 umtxq_remove(uq);
2406 umtxq_unlock(&uq->uq_key);
2407
2408 mtx_lock(&umtx_lock);
2409 uq->uq_inherited_pri = old_inherited_pri;
2410 pri = PRI_MAX;
2411 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2412 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2413 if (uq2 != NULL) {
2414 if (pri > UPRI(uq2->uq_thread))
2415 pri = UPRI(uq2->uq_thread);
2416 }
2417 }
2418 if (pri > uq->uq_inherited_pri)
2419 pri = uq->uq_inherited_pri;
2420 thread_lock(td);
2421 sched_lend_user_prio(td, pri);
2422 thread_unlock(td);
2423 mtx_unlock(&umtx_lock);
2424 }
2425
2426 if (error != 0) {
2427 mtx_lock(&umtx_lock);
2428 uq->uq_inherited_pri = old_inherited_pri;
2429 pri = PRI_MAX;
2430 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2431 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2432 if (uq2 != NULL) {
2433 if (pri > UPRI(uq2->uq_thread))
2434 pri = UPRI(uq2->uq_thread);
2435 }
2436 }
2437 if (pri > uq->uq_inherited_pri)
2438 pri = uq->uq_inherited_pri;
2439 thread_lock(td);
2440 sched_lend_user_prio(td, pri);
2441 thread_unlock(td);
2442 mtx_unlock(&umtx_lock);
2443 }
2444
2445 out:
2446 umtxq_unbusy_unlocked(&uq->uq_key);
2447 umtx_key_release(&uq->uq_key);
2448 return (error);
2449 }
2450
2451 /*
2452 * Unlock a PP mutex.
2453 */
2454 static int
2455 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags)
2456 {
2457 struct umtx_key key;
2458 struct umtx_q *uq, *uq2;
2459 struct umtx_pi *pi;
2460 uint32_t owner, id;
2461 uint32_t rceiling;
2462 int error, pri, new_inherited_pri, su;
2463
2464 id = td->td_tid;
2465 uq = td->td_umtxq;
2466 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2467
2468 /*
2469 * Make sure we own this mtx.
2470 */
2471 error = fueword32(&m->m_owner, &owner);
2472 if (error == -1)
2473 return (EFAULT);
2474
2475 if ((owner & ~UMUTEX_CONTESTED) != id)
2476 return (EPERM);
2477
2478 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t));
2479 if (error != 0)
2480 return (error);
2481
2482 if (rceiling == -1)
2483 new_inherited_pri = PRI_MAX;
2484 else {
2485 rceiling = RTP_PRIO_MAX - rceiling;
2486 if (rceiling > RTP_PRIO_MAX)
2487 return (EINVAL);
2488 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2489 }
2490
2491 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2492 &key)) != 0)
2493 return (error);
2494 umtxq_lock(&key);
2495 umtxq_busy(&key);
2496 umtxq_unlock(&key);
2497 /*
2498 * For priority protected mutex, always set unlocked state
2499 * to UMUTEX_CONTESTED, so that userland always enters kernel
2500 * to lock the mutex, it is necessary because thread priority
2501 * has to be adjusted for such mutex.
2502 */
2503 error = suword32(&m->m_owner, UMUTEX_CONTESTED);
2504
2505 umtxq_lock(&key);
2506 if (error == 0)
2507 umtxq_signal(&key, 1);
2508 umtxq_unbusy(&key);
2509 umtxq_unlock(&key);
2510
2511 if (error == -1)
2512 error = EFAULT;
2513 else {
2514 mtx_lock(&umtx_lock);
2515 if (su != 0)
2516 uq->uq_inherited_pri = new_inherited_pri;
2517 pri = PRI_MAX;
2518 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2519 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2520 if (uq2 != NULL) {
2521 if (pri > UPRI(uq2->uq_thread))
2522 pri = UPRI(uq2->uq_thread);
2523 }
2524 }
2525 if (pri > uq->uq_inherited_pri)
2526 pri = uq->uq_inherited_pri;
2527 thread_lock(td);
2528 sched_lend_user_prio(td, pri);
2529 thread_unlock(td);
2530 mtx_unlock(&umtx_lock);
2531 }
2532 umtx_key_release(&key);
2533 return (error);
2534 }
2535
2536 static int
2537 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling,
2538 uint32_t *old_ceiling)
2539 {
2540 struct umtx_q *uq;
2541 uint32_t save_ceiling;
2542 uint32_t owner, id;
2543 uint32_t flags;
2544 int error, rv;
2545
2546 error = fueword32(&m->m_flags, &flags);
2547 if (error == -1)
2548 return (EFAULT);
2549 if ((flags & UMUTEX_PRIO_PROTECT) == 0)
2550 return (EINVAL);
2551 if (ceiling > RTP_PRIO_MAX)
2552 return (EINVAL);
2553 id = td->td_tid;
2554 uq = td->td_umtxq;
2555 if ((error = umtx_key_get(m, TYPE_PP_UMUTEX, GET_SHARE(flags),
2556 &uq->uq_key)) != 0)
2557 return (error);
2558 for (;;) {
2559 umtxq_lock(&uq->uq_key);
2560 umtxq_busy(&uq->uq_key);
2561 umtxq_unlock(&uq->uq_key);
2562
2563 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2564 if (rv == -1) {
2565 error = EFAULT;
2566 break;
2567 }
2568
2569 rv = casueword32(&m->m_owner,
2570 UMUTEX_CONTESTED, &owner, id | UMUTEX_CONTESTED);
2571 if (rv == -1) {
2572 error = EFAULT;
2573 break;
2574 }
2575
2576 if (owner == UMUTEX_CONTESTED) {
2577 suword32(&m->m_ceilings[0], ceiling);
2578 suword32(&m->m_owner, UMUTEX_CONTESTED);
2579 error = 0;
2580 break;
2581 }
2582
2583 if ((owner & ~UMUTEX_CONTESTED) == id) {
2584 suword32(&m->m_ceilings[0], ceiling);
2585 error = 0;
2586 break;
2587 }
2588
2589 /*
2590 * If we caught a signal, we have retried and now
2591 * exit immediately.
2592 */
2593 if (error != 0)
2594 break;
2595
2596 /*
2597 * We set the contested bit, sleep. Otherwise the lock changed
2598 * and we need to retry or we lost a race to the thread
2599 * unlocking the umtx.
2600 */
2601 umtxq_lock(&uq->uq_key);
2602 umtxq_insert(uq);
2603 umtxq_unbusy(&uq->uq_key);
2604 error = umtxq_sleep(uq, "umtxpp", NULL);
2605 umtxq_remove(uq);
2606 umtxq_unlock(&uq->uq_key);
2607 }
2608 umtxq_lock(&uq->uq_key);
2609 if (error == 0)
2610 umtxq_signal(&uq->uq_key, INT_MAX);
2611 umtxq_unbusy(&uq->uq_key);
2612 umtxq_unlock(&uq->uq_key);
2613 umtx_key_release(&uq->uq_key);
2614 if (error == 0 && old_ceiling != NULL)
2615 suword32(old_ceiling, save_ceiling);
2616 return (error);
2617 }
2618
2619 /*
2620 * Lock a userland POSIX mutex.
2621 */
2622 static int
2623 do_lock_umutex(struct thread *td, struct umutex *m,
2624 struct _umtx_time *timeout, int mode)
2625 {
2626 uint32_t flags;
2627 int error;
2628
2629 error = fueword32(&m->m_flags, &flags);
2630 if (error == -1)
2631 return (EFAULT);
2632
2633 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2634 case 0:
2635 error = do_lock_normal(td, m, flags, timeout, mode);
2636 break;
2637 case UMUTEX_PRIO_INHERIT:
2638 error = do_lock_pi(td, m, flags, timeout, mode);
2639 break;
2640 case UMUTEX_PRIO_PROTECT:
2641 error = do_lock_pp(td, m, flags, timeout, mode);
2642 break;
2643 default:
2644 return (EINVAL);
2645 }
2646 if (timeout == NULL) {
2647 if (error == EINTR && mode != _UMUTEX_WAIT)
2648 error = ERESTART;
2649 } else {
2650 /* Timed-locking is not restarted. */
2651 if (error == ERESTART)
2652 error = EINTR;
2653 }
2654 return (error);
2655 }
2656
2657 /*
2658 * Unlock a userland POSIX mutex.
2659 */
2660 static int
2661 do_unlock_umutex(struct thread *td, struct umutex *m)
2662 {
2663 uint32_t flags;
2664 int error;
2665
2666 error = fueword32(&m->m_flags, &flags);
2667 if (error == -1)
2668 return (EFAULT);
2669
2670 switch(flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2671 case 0:
2672 return (do_unlock_normal(td, m, flags));
2673 case UMUTEX_PRIO_INHERIT:
2674 return (do_unlock_pi(td, m, flags));
2675 case UMUTEX_PRIO_PROTECT:
2676 return (do_unlock_pp(td, m, flags));
2677 }
2678
2679 return (EINVAL);
2680 }
2681
2682 static int
2683 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m,
2684 struct timespec *timeout, u_long wflags)
2685 {
2686 struct abs_timeout timo;
2687 struct umtx_q *uq;
2688 uint32_t flags, clockid, hasw;
2689 int error;
2690
2691 uq = td->td_umtxq;
2692 error = fueword32(&cv->c_flags, &flags);
2693 if (error == -1)
2694 return (EFAULT);
2695 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key);
2696 if (error != 0)
2697 return (error);
2698
2699 if ((wflags & CVWAIT_CLOCKID) != 0) {
2700 error = fueword32(&cv->c_clockid, &clockid);
2701 if (error == -1) {
2702 umtx_key_release(&uq->uq_key);
2703 return (EFAULT);
2704 }
2705 if (clockid < CLOCK_REALTIME ||
2706 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2707 /* hmm, only HW clock id will work. */
2708 umtx_key_release(&uq->uq_key);
2709 return (EINVAL);
2710 }
2711 } else {
2712 clockid = CLOCK_REALTIME;
2713 }
2714
2715 umtxq_lock(&uq->uq_key);
2716 umtxq_busy(&uq->uq_key);
2717 umtxq_insert(uq);
2718 umtxq_unlock(&uq->uq_key);
2719
2720 /*
2721 * Set c_has_waiters to 1 before releasing user mutex, also
2722 * don't modify cache line when unnecessary.
2723 */
2724 error = fueword32(&cv->c_has_waiters, &hasw);
2725 if (error == 0 && hasw == 0)
2726 suword32(&cv->c_has_waiters, 1);
2727
2728 umtxq_unbusy_unlocked(&uq->uq_key);
2729
2730 error = do_unlock_umutex(td, m);
2731
2732 if (timeout != NULL)
2733 abs_timeout_init(&timo, clockid, ((wflags & CVWAIT_ABSTIME) != 0),
2734 timeout);
2735
2736 umtxq_lock(&uq->uq_key);
2737 if (error == 0) {
2738 error = umtxq_sleep(uq, "ucond", timeout == NULL ?
2739 NULL : &timo);
2740 }
2741
2742 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2743 error = 0;
2744 else {
2745 /*
2746 * This must be timeout,interrupted by signal or
2747 * surprious wakeup, clear c_has_waiter flag when
2748 * necessary.
2749 */
2750 umtxq_busy(&uq->uq_key);
2751 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2752 int oldlen = uq->uq_cur_queue->length;
2753 umtxq_remove(uq);
2754 if (oldlen == 1) {
2755 umtxq_unlock(&uq->uq_key);
2756 suword32(&cv->c_has_waiters, 0);
2757 umtxq_lock(&uq->uq_key);
2758 }
2759 }
2760 umtxq_unbusy(&uq->uq_key);
2761 if (error == ERESTART)
2762 error = EINTR;
2763 }
2764
2765 umtxq_unlock(&uq->uq_key);
2766 umtx_key_release(&uq->uq_key);
2767 return (error);
2768 }
2769
2770 /*
2771 * Signal a userland condition variable.
2772 */
2773 static int
2774 do_cv_signal(struct thread *td, struct ucond *cv)
2775 {
2776 struct umtx_key key;
2777 int error, cnt, nwake;
2778 uint32_t flags;
2779
2780 error = fueword32(&cv->c_flags, &flags);
2781 if (error == -1)
2782 return (EFAULT);
2783 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2784 return (error);
2785 umtxq_lock(&key);
2786 umtxq_busy(&key);
2787 cnt = umtxq_count(&key);
2788 nwake = umtxq_signal(&key, 1);
2789 if (cnt <= nwake) {
2790 umtxq_unlock(&key);
2791 error = suword32(&cv->c_has_waiters, 0);
2792 if (error == -1)
2793 error = EFAULT;
2794 umtxq_lock(&key);
2795 }
2796 umtxq_unbusy(&key);
2797 umtxq_unlock(&key);
2798 umtx_key_release(&key);
2799 return (error);
2800 }
2801
2802 static int
2803 do_cv_broadcast(struct thread *td, struct ucond *cv)
2804 {
2805 struct umtx_key key;
2806 int error;
2807 uint32_t flags;
2808
2809 error = fueword32(&cv->c_flags, &flags);
2810 if (error == -1)
2811 return (EFAULT);
2812 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0)
2813 return (error);
2814
2815 umtxq_lock(&key);
2816 umtxq_busy(&key);
2817 umtxq_signal(&key, INT_MAX);
2818 umtxq_unlock(&key);
2819
2820 error = suword32(&cv->c_has_waiters, 0);
2821 if (error == -1)
2822 error = EFAULT;
2823
2824 umtxq_unbusy_unlocked(&key);
2825
2826 umtx_key_release(&key);
2827 return (error);
2828 }
2829
2830 static int
2831 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
2832 {
2833 struct abs_timeout timo;
2834 struct umtx_q *uq;
2835 uint32_t flags, wrflags;
2836 int32_t state, oldstate;
2837 int32_t blocked_readers;
2838 int error, rv;
2839
2840 uq = td->td_umtxq;
2841 error = fueword32(&rwlock->rw_flags, &flags);
2842 if (error == -1)
2843 return (EFAULT);
2844 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
2845 if (error != 0)
2846 return (error);
2847
2848 if (timeout != NULL)
2849 abs_timeout_init2(&timo, timeout);
2850
2851 wrflags = URWLOCK_WRITE_OWNER;
2852 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER))
2853 wrflags |= URWLOCK_WRITE_WAITERS;
2854
2855 for (;;) {
2856 rv = fueword32(&rwlock->rw_state, &state);
2857 if (rv == -1) {
2858 umtx_key_release(&uq->uq_key);
2859 return (EFAULT);
2860 }
2861
2862 /* try to lock it */
2863 while (!(state & wrflags)) {
2864 if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS)) {
2865 umtx_key_release(&uq->uq_key);
2866 return (EAGAIN);
2867 }
2868 rv = casueword32(&rwlock->rw_state, state,
2869 &oldstate, state + 1);
2870 if (rv == -1) {
2871 umtx_key_release(&uq->uq_key);
2872 return (EFAULT);
2873 }
2874 if (oldstate == state) {
2875 umtx_key_release(&uq->uq_key);
2876 return (0);
2877 }
2878 error = umtxq_check_susp(td);
2879 if (error != 0)
2880 break;
2881 state = oldstate;
2882 }
2883
2884 if (error)
2885 break;
2886
2887 /* grab monitor lock */
2888 umtxq_lock(&uq->uq_key);
2889 umtxq_busy(&uq->uq_key);
2890 umtxq_unlock(&uq->uq_key);
2891
2892 /*
2893 * re-read the state, in case it changed between the try-lock above
2894 * and the check below
2895 */
2896 rv = fueword32(&rwlock->rw_state, &state);
2897 if (rv == -1)
2898 error = EFAULT;
2899
2900 /* set read contention bit */
2901 while (error == 0 && (state & wrflags) &&
2902 !(state & URWLOCK_READ_WAITERS)) {
2903 rv = casueword32(&rwlock->rw_state, state,
2904 &oldstate, state | URWLOCK_READ_WAITERS);
2905 if (rv == -1) {
2906 error = EFAULT;
2907 break;
2908 }
2909 if (oldstate == state)
2910 goto sleep;
2911 state = oldstate;
2912 error = umtxq_check_susp(td);
2913 if (error != 0)
2914 break;
2915 }
2916 if (error != 0) {
2917 umtxq_unbusy_unlocked(&uq->uq_key);
2918 break;
2919 }
2920
2921 /* state is changed while setting flags, restart */
2922 if (!(state & wrflags)) {
2923 umtxq_unbusy_unlocked(&uq->uq_key);
2924 error = umtxq_check_susp(td);
2925 if (error != 0)
2926 break;
2927 continue;
2928 }
2929
2930 sleep:
2931 /* contention bit is set, before sleeping, increase read waiter count */
2932 rv = fueword32(&rwlock->rw_blocked_readers,
2933 &blocked_readers);
2934 if (rv == -1) {
2935 umtxq_unbusy_unlocked(&uq->uq_key);
2936 error = EFAULT;
2937 break;
2938 }
2939 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
2940
2941 while (state & wrflags) {
2942 umtxq_lock(&uq->uq_key);
2943 umtxq_insert(uq);
2944 umtxq_unbusy(&uq->uq_key);
2945
2946 error = umtxq_sleep(uq, "urdlck", timeout == NULL ?
2947 NULL : &timo);
2948
2949 umtxq_busy(&uq->uq_key);
2950 umtxq_remove(uq);
2951 umtxq_unlock(&uq->uq_key);
2952 if (error)
2953 break;
2954 rv = fueword32(&rwlock->rw_state, &state);
2955 if (rv == -1) {
2956 error = EFAULT;
2957 break;
2958 }
2959 }
2960
2961 /* decrease read waiter count, and may clear read contention bit */
2962 rv = fueword32(&rwlock->rw_blocked_readers,
2963 &blocked_readers);
2964 if (rv == -1) {
2965 umtxq_unbusy_unlocked(&uq->uq_key);
2966 error = EFAULT;
2967 break;
2968 }
2969 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
2970 if (blocked_readers == 1) {
2971 rv = fueword32(&rwlock->rw_state, &state);
2972 if (rv == -1)
2973 error = EFAULT;
2974 while (error == 0) {
2975 rv = casueword32(&rwlock->rw_state, state,
2976 &oldstate, state & ~URWLOCK_READ_WAITERS);
2977 if (rv == -1) {
2978 error = EFAULT;
2979 break;
2980 }
2981 if (oldstate == state)
2982 break;
2983 state = oldstate;
2984 error = umtxq_check_susp(td);
2985 }
2986 }
2987
2988 umtxq_unbusy_unlocked(&uq->uq_key);
2989 if (error != 0)
2990 break;
2991 }
2992 umtx_key_release(&uq->uq_key);
2993 if (error == ERESTART)
2994 error = EINTR;
2995 return (error);
2996 }
2997
2998 static int
2999 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
3000 {
3001 struct abs_timeout timo;
3002 struct umtx_q *uq;
3003 uint32_t flags;
3004 int32_t state, oldstate;
3005 int32_t blocked_writers;
3006 int32_t blocked_readers;
3007 int error, rv;
3008
3009 uq = td->td_umtxq;
3010 error = fueword32(&rwlock->rw_flags, &flags);
3011 if (error == -1)
3012 return (EFAULT);
3013 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3014 if (error != 0)
3015 return (error);
3016
3017 if (timeout != NULL)
3018 abs_timeout_init2(&timo, timeout);
3019
3020 blocked_readers = 0;
3021 for (;;) {
3022 rv = fueword32(&rwlock->rw_state, &state);
3023 if (rv == -1) {
3024 umtx_key_release(&uq->uq_key);
3025 return (EFAULT);
3026 }
3027 while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
3028 rv = casueword32(&rwlock->rw_state, state,
3029 &oldstate, state | URWLOCK_WRITE_OWNER);
3030 if (rv == -1) {
3031 umtx_key_release(&uq->uq_key);
3032 return (EFAULT);
3033 }
3034 if (oldstate == state) {
3035 umtx_key_release(&uq->uq_key);
3036 return (0);
3037 }
3038 state = oldstate;
3039 error = umtxq_check_susp(td);
3040 if (error != 0)
3041 break;
3042 }
3043
3044 if (error) {
3045 if (!(state & (URWLOCK_WRITE_OWNER|URWLOCK_WRITE_WAITERS)) &&
3046 blocked_readers != 0) {
3047 umtxq_lock(&uq->uq_key);
3048 umtxq_busy(&uq->uq_key);
3049 umtxq_signal_queue(&uq->uq_key, INT_MAX, UMTX_SHARED_QUEUE);
3050 umtxq_unbusy(&uq->uq_key);
3051 umtxq_unlock(&uq->uq_key);
3052 }
3053
3054 break;
3055 }
3056
3057 /* grab monitor lock */
3058 umtxq_lock(&uq->uq_key);
3059 umtxq_busy(&uq->uq_key);
3060 umtxq_unlock(&uq->uq_key);
3061
3062 /*
3063 * re-read the state, in case it changed between the try-lock above
3064 * and the check below
3065 */
3066 rv = fueword32(&rwlock->rw_state, &state);
3067 if (rv == -1)
3068 error = EFAULT;
3069
3070 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
3071 URWLOCK_READER_COUNT(state) != 0) &&
3072 (state & URWLOCK_WRITE_WAITERS) == 0) {
3073 rv = casueword32(&rwlock->rw_state, state,
3074 &oldstate, state | URWLOCK_WRITE_WAITERS);
3075 if (rv == -1) {
3076 error = EFAULT;
3077 break;
3078 }
3079 if (oldstate == state)
3080 goto sleep;
3081 state = oldstate;
3082 error = umtxq_check_susp(td);
3083 if (error != 0)
3084 break;
3085 }
3086 if (error != 0) {
3087 umtxq_unbusy_unlocked(&uq->uq_key);
3088 break;
3089 }
3090
3091 if (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
3092 umtxq_unbusy_unlocked(&uq->uq_key);
3093 error = umtxq_check_susp(td);
3094 if (error != 0)
3095 break;
3096 continue;
3097 }
3098 sleep:
3099 rv = fueword32(&rwlock->rw_blocked_writers,
3100 &blocked_writers);
3101 if (rv == -1) {
3102 umtxq_unbusy_unlocked(&uq->uq_key);
3103 error = EFAULT;
3104 break;
3105 }
3106 suword32(&rwlock->rw_blocked_writers, blocked_writers+1);
3107
3108 while ((state & URWLOCK_WRITE_OWNER) || URWLOCK_READER_COUNT(state) != 0) {
3109 umtxq_lock(&uq->uq_key);
3110 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3111 umtxq_unbusy(&uq->uq_key);
3112
3113 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ?
3114 NULL : &timo);
3115
3116 umtxq_busy(&uq->uq_key);
3117 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE);
3118 umtxq_unlock(&uq->uq_key);
3119 if (error)
3120 break;
3121 rv = fueword32(&rwlock->rw_state, &state);
3122 if (rv == -1) {
3123 error = EFAULT;
3124 break;
3125 }
3126 }
3127
3128 rv = fueword32(&rwlock->rw_blocked_writers,
3129 &blocked_writers);
3130 if (rv == -1) {
3131 umtxq_unbusy_unlocked(&uq->uq_key);
3132 error = EFAULT;
3133 break;
3134 }
3135 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3136 if (blocked_writers == 1) {
3137 rv = fueword32(&rwlock->rw_state, &state);
3138 if (rv == -1) {
3139 umtxq_unbusy_unlocked(&uq->uq_key);
3140 error = EFAULT;
3141 break;
3142 }
3143 for (;;) {
3144 rv = casueword32(&rwlock->rw_state, state,
3145 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3146 if (rv == -1) {
3147 error = EFAULT;
3148 break;
3149 }
3150 if (oldstate == state)
3151 break;
3152 state = oldstate;
3153 error = umtxq_check_susp(td);
3154 /*
3155 * We are leaving the URWLOCK_WRITE_WAITERS
3156 * behind, but this should not harm the
3157 * correctness.
3158 */
3159 if (error != 0)
3160 break;
3161 }
3162 rv = fueword32(&rwlock->rw_blocked_readers,
3163 &blocked_readers);
3164 if (rv == -1) {
3165 umtxq_unbusy_unlocked(&uq->uq_key);
3166 error = EFAULT;
3167 break;
3168 }
3169 } else
3170 blocked_readers = 0;
3171
3172 umtxq_unbusy_unlocked(&uq->uq_key);
3173 }
3174
3175 umtx_key_release(&uq->uq_key);
3176 if (error == ERESTART)
3177 error = EINTR;
3178 return (error);
3179 }
3180
3181 static int
3182 do_rw_unlock(struct thread *td, struct urwlock *rwlock)
3183 {
3184 struct umtx_q *uq;
3185 uint32_t flags;
3186 int32_t state, oldstate;
3187 int error, rv, q, count;
3188
3189 uq = td->td_umtxq;
3190 error = fueword32(&rwlock->rw_flags, &flags);
3191 if (error == -1)
3192 return (EFAULT);
3193 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key);
3194 if (error != 0)
3195 return (error);
3196
3197 error = fueword32(&rwlock->rw_state, &state);
3198 if (error == -1) {
3199 error = EFAULT;
3200 goto out;
3201 }
3202 if (state & URWLOCK_WRITE_OWNER) {
3203 for (;;) {
3204 rv = casueword32(&rwlock->rw_state, state,
3205 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3206 if (rv == -1) {
3207 error = EFAULT;
3208 goto out;
3209 }
3210 if (oldstate != state) {
3211 state = oldstate;
3212 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3213 error = EPERM;
3214 goto out;
3215 }
3216 error = umtxq_check_susp(td);
3217 if (error != 0)
3218 goto out;
3219 } else
3220 break;
3221 }
3222 } else if (URWLOCK_READER_COUNT(state) != 0) {
3223 for (;;) {
3224 rv = casueword32(&rwlock->rw_state, state,
3225 &oldstate, state - 1);
3226 if (rv == -1) {
3227 error = EFAULT;
3228 goto out;
3229 }
3230 if (oldstate != state) {
3231 state = oldstate;
3232 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3233 error = EPERM;
3234 goto out;
3235 }
3236 error = umtxq_check_susp(td);
3237 if (error != 0)
3238 goto out;
3239 } else
3240 break;
3241 }
3242 } else {
3243 error = EPERM;
3244 goto out;
3245 }
3246
3247 count = 0;
3248
3249 if (!(flags & URWLOCK_PREFER_READER)) {
3250 if (state & URWLOCK_WRITE_WAITERS) {
3251 count = 1;
3252 q = UMTX_EXCLUSIVE_QUEUE;
3253 } else if (state & URWLOCK_READ_WAITERS) {
3254 count = INT_MAX;
3255 q = UMTX_SHARED_QUEUE;
3256 }
3257 } else {
3258 if (state & URWLOCK_READ_WAITERS) {
3259 count = INT_MAX;
3260 q = UMTX_SHARED_QUEUE;
3261 } else if (state & URWLOCK_WRITE_WAITERS) {
3262 count = 1;
3263 q = UMTX_EXCLUSIVE_QUEUE;
3264 }
3265 }
3266
3267 if (count) {
3268 umtxq_lock(&uq->uq_key);
3269 umtxq_busy(&uq->uq_key);
3270 umtxq_signal_queue(&uq->uq_key, count, q);
3271 umtxq_unbusy(&uq->uq_key);
3272 umtxq_unlock(&uq->uq_key);
3273 }
3274 out:
3275 umtx_key_release(&uq->uq_key);
3276 return (error);
3277 }
3278
3279 static int
3280 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
3281 {
3282 struct abs_timeout timo;
3283 struct umtx_q *uq;
3284 uint32_t flags, count, count1;
3285 int error, rv;
3286
3287 uq = td->td_umtxq;
3288 error = fueword32(&sem->_flags, &flags);
3289 if (error == -1)
3290 return (EFAULT);
3291 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key);
3292 if (error != 0)
3293 return (error);
3294
3295 if (timeout != NULL)
3296 abs_timeout_init2(&timo, timeout);
3297
3298 umtxq_lock(&uq->uq_key);
3299 umtxq_busy(&uq->uq_key);
3300 umtxq_insert(uq);
3301 umtxq_unlock(&uq->uq_key);
3302 rv = casueword32(&sem->_has_waiters, 0, &count1, 1);
3303 if (rv == 0)
3304 rv = fueword32(&sem->_count, &count);
3305 if (rv == -1 || count != 0) {
3306 umtxq_lock(&uq->uq_key);
3307 umtxq_unbusy(&uq->uq_key);
3308 umtxq_remove(uq);
3309 umtxq_unlock(&uq->uq_key);
3310 umtx_key_release(&uq->uq_key);
3311 return (rv == -1 ? EFAULT : 0);
3312 }
3313 umtxq_lock(&uq->uq_key);
3314 umtxq_unbusy(&uq->uq_key);
3315
3316 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo);
3317
3318 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3319 error = 0;
3320 else {
3321 umtxq_remove(uq);
3322 /* A relative timeout cannot be restarted. */
3323 if (error == ERESTART && timeout != NULL &&
3324 (timeout->_flags & UMTX_ABSTIME) == 0)
3325 error = EINTR;
3326 }
3327 umtxq_unlock(&uq->uq_key);
3328 umtx_key_release(&uq->uq_key);
3329 return (error);
3330 }
3331
3332 /*
3333 * Signal a userland condition variable.
3334 */
3335 static int
3336 do_sem_wake(struct thread *td, struct _usem *sem)
3337 {
3338 struct umtx_key key;
3339 int error, cnt;
3340 uint32_t flags;
3341
3342 error = fueword32(&sem->_flags, &flags);
3343 if (error == -1)
3344 return (EFAULT);
3345 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0)
3346 return (error);
3347 umtxq_lock(&key);
3348 umtxq_busy(&key);
3349 cnt = umtxq_count(&key);
3350 if (cnt > 0) {
3351 umtxq_signal(&key, 1);
3352 /*
3353 * Check if count is greater than 0, this means the memory is
3354 * still being referenced by user code, so we can safely
3355 * update _has_waiters flag.
3356 */
3357 if (cnt == 1) {
3358 umtxq_unlock(&key);
3359 error = suword32(&sem->_has_waiters, 0);
3360 umtxq_lock(&key);
3361 if (error == -1)
3362 error = EFAULT;
3363 }
3364 }
3365 umtxq_unbusy(&key);
3366 umtxq_unlock(&key);
3367 umtx_key_release(&key);
3368 return (error);
3369 }
3370
3371 int
3372 sys__umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
3373 /* struct umtx *umtx */
3374 {
3375 return do_lock_umtx(td, uap->umtx, td->td_tid, 0);
3376 }
3377
3378 int
3379 sys__umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
3380 /* struct umtx *umtx */
3381 {
3382 return do_unlock_umtx(td, uap->umtx, td->td_tid);
3383 }
3384
3385 inline int
3386 umtx_copyin_timeout(const void *addr, struct timespec *tsp)
3387 {
3388 int error;
3389
3390 error = copyin(addr, tsp, sizeof(struct timespec));
3391 if (error == 0) {
3392 if (tsp->tv_sec < 0 ||
3393 tsp->tv_nsec >= 1000000000 ||
3394 tsp->tv_nsec < 0)
3395 error = EINVAL;
3396 }
3397 return (error);
3398 }
3399
3400 static inline int
3401 umtx_copyin_umtx_time(const void *addr, size_t size, struct _umtx_time *tp)
3402 {
3403 int error;
3404
3405 if (size <= sizeof(struct timespec)) {
3406 tp->_clockid = CLOCK_REALTIME;
3407 tp->_flags = 0;
3408 error = copyin(addr, &tp->_timeout, sizeof(struct timespec));
3409 } else
3410 error = copyin(addr, tp, sizeof(struct _umtx_time));
3411 if (error != 0)
3412 return (error);
3413 if (tp->_timeout.tv_sec < 0 ||
3414 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3415 return (EINVAL);
3416 return (0);
3417 }
3418
3419 static int
3420 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap)
3421 {
3422 struct timespec *ts, timeout;
3423 int error;
3424
3425 /* Allow a null timespec (wait forever). */
3426 if (uap->uaddr2 == NULL)
3427 ts = NULL;
3428 else {
3429 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3430 if (error != 0)
3431 return (error);
3432 ts = &timeout;
3433 }
3434 return (do_lock_umtx(td, uap->obj, uap->val, ts));
3435 }
3436
3437 static int
3438 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap)
3439 {
3440 return (do_unlock_umtx(td, uap->obj, uap->val));
3441 }
3442
3443 static int
3444 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap)
3445 {
3446 struct _umtx_time timeout, *tm_p;
3447 int error;
3448
3449 if (uap->uaddr2 == NULL)
3450 tm_p = NULL;
3451 else {
3452 error = umtx_copyin_umtx_time(
3453 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3454 if (error != 0)
3455 return (error);
3456 tm_p = &timeout;
3457 }
3458 return do_wait(td, uap->obj, uap->val, tm_p, 0, 0);
3459 }
3460
3461 static int
3462 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap)
3463 {
3464 struct _umtx_time timeout, *tm_p;
3465 int error;
3466
3467 if (uap->uaddr2 == NULL)
3468 tm_p = NULL;
3469 else {
3470 error = umtx_copyin_umtx_time(
3471 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3472 if (error != 0)
3473 return (error);
3474 tm_p = &timeout;
3475 }
3476 return do_wait(td, uap->obj, uap->val, tm_p, 1, 0);
3477 }
3478
3479 static int
3480 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap)
3481 {
3482 struct _umtx_time *tm_p, timeout;
3483 int error;
3484
3485 if (uap->uaddr2 == NULL)
3486 tm_p = NULL;
3487 else {
3488 error = umtx_copyin_umtx_time(
3489 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3490 if (error != 0)
3491 return (error);
3492 tm_p = &timeout;
3493 }
3494 return do_wait(td, uap->obj, uap->val, tm_p, 1, 1);
3495 }
3496
3497 static int
3498 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap)
3499 {
3500 return (kern_umtx_wake(td, uap->obj, uap->val, 0));
3501 }
3502
3503 #define BATCH_SIZE 128
3504 static int
3505 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap)
3506 {
3507 int count = uap->val;
3508 void *uaddrs[BATCH_SIZE];
3509 char **upp = (char **)uap->obj;
3510 int tocopy;
3511 int error = 0;
3512 int i, pos = 0;
3513
3514 while (count > 0) {
3515 tocopy = count;
3516 if (tocopy > BATCH_SIZE)
3517 tocopy = BATCH_SIZE;
3518 error = copyin(upp+pos, uaddrs, tocopy * sizeof(char *));
3519 if (error != 0)
3520 break;
3521 for (i = 0; i < tocopy; ++i)
3522 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1);
3523 count -= tocopy;
3524 pos += tocopy;
3525 }
3526 return (error);
3527 }
3528
3529 static int
3530 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap)
3531 {
3532 return (kern_umtx_wake(td, uap->obj, uap->val, 1));
3533 }
3534
3535 static int
3536 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap)
3537 {
3538 struct _umtx_time *tm_p, timeout;
3539 int error;
3540
3541 /* Allow a null timespec (wait forever). */
3542 if (uap->uaddr2 == NULL)
3543 tm_p = NULL;
3544 else {
3545 error = umtx_copyin_umtx_time(
3546 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3547 if (error != 0)
3548 return (error);
3549 tm_p = &timeout;
3550 }
3551 return do_lock_umutex(td, uap->obj, tm_p, 0);
3552 }
3553
3554 static int
3555 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap)
3556 {
3557 return do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY);
3558 }
3559
3560 static int
3561 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap)
3562 {
3563 struct _umtx_time *tm_p, timeout;
3564 int error;
3565
3566 /* Allow a null timespec (wait forever). */
3567 if (uap->uaddr2 == NULL)
3568 tm_p = NULL;
3569 else {
3570 error = umtx_copyin_umtx_time(
3571 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3572 if (error != 0)
3573 return (error);
3574 tm_p = &timeout;
3575 }
3576 return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT);
3577 }
3578
3579 static int
3580 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap)
3581 {
3582 return do_wake_umutex(td, uap->obj);
3583 }
3584
3585 static int
3586 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap)
3587 {
3588 return do_unlock_umutex(td, uap->obj);
3589 }
3590
3591 static int
3592 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap)
3593 {
3594 return do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1);
3595 }
3596
3597 static int
3598 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap)
3599 {
3600 struct timespec *ts, timeout;
3601 int error;
3602
3603 /* Allow a null timespec (wait forever). */
3604 if (uap->uaddr2 == NULL)
3605 ts = NULL;
3606 else {
3607 error = umtx_copyin_timeout(uap->uaddr2, &timeout);
3608 if (error != 0)
3609 return (error);
3610 ts = &timeout;
3611 }
3612 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3613 }
3614
3615 static int
3616 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap)
3617 {
3618 return do_cv_signal(td, uap->obj);
3619 }
3620
3621 static int
3622 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap)
3623 {
3624 return do_cv_broadcast(td, uap->obj);
3625 }
3626
3627 static int
3628 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap)
3629 {
3630 struct _umtx_time timeout;
3631 int error;
3632
3633 /* Allow a null timespec (wait forever). */
3634 if (uap->uaddr2 == NULL) {
3635 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3636 } else {
3637 error = umtx_copyin_umtx_time(uap->uaddr2,
3638 (size_t)uap->uaddr1, &timeout);
3639 if (error != 0)
3640 return (error);
3641 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3642 }
3643 return (error);
3644 }
3645
3646 static int
3647 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap)
3648 {
3649 struct _umtx_time timeout;
3650 int error;
3651
3652 /* Allow a null timespec (wait forever). */
3653 if (uap->uaddr2 == NULL) {
3654 error = do_rw_wrlock(td, uap->obj, 0);
3655 } else {
3656 error = umtx_copyin_umtx_time(uap->uaddr2,
3657 (size_t)uap->uaddr1, &timeout);
3658 if (error != 0)
3659 return (error);
3660
3661 error = do_rw_wrlock(td, uap->obj, &timeout);
3662 }
3663 return (error);
3664 }
3665
3666 static int
3667 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap)
3668 {
3669 return do_rw_unlock(td, uap->obj);
3670 }
3671
3672 static int
3673 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap)
3674 {
3675 struct _umtx_time *tm_p, timeout;
3676 int error;
3677
3678 /* Allow a null timespec (wait forever). */
3679 if (uap->uaddr2 == NULL)
3680 tm_p = NULL;
3681 else {
3682 error = umtx_copyin_umtx_time(
3683 uap->uaddr2, (size_t)uap->uaddr1, &timeout);
3684 if (error != 0)
3685 return (error);
3686 tm_p = &timeout;
3687 }
3688 return (do_sem_wait(td, uap->obj, tm_p));
3689 }
3690
3691 static int
3692 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap)
3693 {
3694 return do_sem_wake(td, uap->obj);
3695 }
3696
3697 static int
3698 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap)
3699 {
3700 return do_wake2_umutex(td, uap->obj, uap->val);
3701 }
3702
3703 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap);
3704
3705 static _umtx_op_func op_table[] = {
3706 __umtx_op_lock_umtx, /* UMTX_OP_LOCK */
3707 __umtx_op_unlock_umtx, /* UMTX_OP_UNLOCK */
3708 __umtx_op_wait, /* UMTX_OP_WAIT */
3709 __umtx_op_wake, /* UMTX_OP_WAKE */
3710 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_TRYLOCK */
3711 __umtx_op_lock_umutex, /* UMTX_OP_MUTEX_LOCK */
3712 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
3713 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
3714 __umtx_op_cv_wait, /* UMTX_OP_CV_WAIT*/
3715 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
3716 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
3717 __umtx_op_wait_uint, /* UMTX_OP_WAIT_UINT */
3718 __umtx_op_rw_rdlock, /* UMTX_OP_RW_RDLOCK */
3719 __umtx_op_rw_wrlock, /* UMTX_OP_RW_WRLOCK */
3720 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
3721 __umtx_op_wait_uint_private, /* UMTX_OP_WAIT_UINT_PRIVATE */
3722 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
3723 __umtx_op_wait_umutex, /* UMTX_OP_UMUTEX_WAIT */
3724 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
3725 __umtx_op_sem_wait, /* UMTX_OP_SEM_WAIT */
3726 __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */
3727 __umtx_op_nwake_private, /* UMTX_OP_NWAKE_PRIVATE */
3728 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
3729 };
3730
3731 int
3732 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
3733 {
3734 if ((unsigned)uap->op < UMTX_OP_MAX)
3735 return (*op_table[uap->op])(td, uap);
3736 return (EINVAL);
3737 }
3738
3739 #ifdef COMPAT_FREEBSD32
3740 int
3741 freebsd32_umtx_lock(struct thread *td, struct freebsd32_umtx_lock_args *uap)
3742 /* struct umtx *umtx */
3743 {
3744 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
3745 }
3746
3747 int
3748 freebsd32_umtx_unlock(struct thread *td, struct freebsd32_umtx_unlock_args *uap)
3749 /* struct umtx *umtx */
3750 {
3751 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
3752 }
3753
3754 struct timespec32 {
3755 int32_t tv_sec;
3756 int32_t tv_nsec;
3757 };
3758
3759 struct umtx_time32 {
3760 struct timespec32 timeout;
3761 uint32_t flags;
3762 uint32_t clockid;
3763 };
3764
3765 static inline int
3766 umtx_copyin_timeout32(void *addr, struct timespec *tsp)
3767 {
3768 struct timespec32 ts32;
3769 int error;
3770
3771 error = copyin(addr, &ts32, sizeof(struct timespec32));
3772 if (error == 0) {
3773 if (ts32.tv_sec < 0 ||
3774 ts32.tv_nsec >= 1000000000 ||
3775 ts32.tv_nsec < 0)
3776 error = EINVAL;
3777 else {
3778 tsp->tv_sec = ts32.tv_sec;
3779 tsp->tv_nsec = ts32.tv_nsec;
3780 }
3781 }
3782 return (error);
3783 }
3784
3785 static inline int
3786 umtx_copyin_umtx_time32(const void *addr, size_t size, struct _umtx_time *tp)
3787 {
3788 struct umtx_time32 t32;
3789 int error;
3790
3791 t32.clockid = CLOCK_REALTIME;
3792 t32.flags = 0;
3793 if (size <= sizeof(struct timespec32))
3794 error = copyin(addr, &t32.timeout, sizeof(struct timespec32));
3795 else
3796 error = copyin(addr, &t32, sizeof(struct umtx_time32));
3797 if (error != 0)
3798 return (error);
3799 if (t32.timeout.tv_sec < 0 ||
3800 t32.timeout.tv_nsec >= 1000000000 || t32.timeout.tv_nsec < 0)
3801 return (EINVAL);
3802 tp->_timeout.tv_sec = t32.timeout.tv_sec;
3803 tp->_timeout.tv_nsec = t32.timeout.tv_nsec;
3804 tp->_flags = t32.flags;
3805 tp->_clockid = t32.clockid;
3806 return (0);
3807 }
3808
3809 static int
3810 __umtx_op_lock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3811 {
3812 struct timespec *ts, timeout;
3813 int error;
3814
3815 /* Allow a null timespec (wait forever). */
3816 if (uap->uaddr2 == NULL)
3817 ts = NULL;
3818 else {
3819 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3820 if (error != 0)
3821 return (error);
3822 ts = &timeout;
3823 }
3824 return (do_lock_umtx32(td, uap->obj, uap->val, ts));
3825 }
3826
3827 static int
3828 __umtx_op_unlock_umtx_compat32(struct thread *td, struct _umtx_op_args *uap)
3829 {
3830 return (do_unlock_umtx32(td, uap->obj, (uint32_t)uap->val));
3831 }
3832
3833 static int
3834 __umtx_op_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3835 {
3836 struct _umtx_time *tm_p, timeout;
3837 int error;
3838
3839 if (uap->uaddr2 == NULL)
3840 tm_p = NULL;
3841 else {
3842 error = umtx_copyin_umtx_time32(uap->uaddr2,
3843 (size_t)uap->uaddr1, &timeout);
3844 if (error != 0)
3845 return (error);
3846 tm_p = &timeout;
3847 }
3848 return do_wait(td, uap->obj, uap->val, tm_p, 1, 0);
3849 }
3850
3851 static int
3852 __umtx_op_lock_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3853 {
3854 struct _umtx_time *tm_p, timeout;
3855 int error;
3856
3857 /* Allow a null timespec (wait forever). */
3858 if (uap->uaddr2 == NULL)
3859 tm_p = NULL;
3860 else {
3861 error = umtx_copyin_umtx_time(uap->uaddr2,
3862 (size_t)uap->uaddr1, &timeout);
3863 if (error != 0)
3864 return (error);
3865 tm_p = &timeout;
3866 }
3867 return do_lock_umutex(td, uap->obj, tm_p, 0);
3868 }
3869
3870 static int
3871 __umtx_op_wait_umutex_compat32(struct thread *td, struct _umtx_op_args *uap)
3872 {
3873 struct _umtx_time *tm_p, timeout;
3874 int error;
3875
3876 /* Allow a null timespec (wait forever). */
3877 if (uap->uaddr2 == NULL)
3878 tm_p = NULL;
3879 else {
3880 error = umtx_copyin_umtx_time32(uap->uaddr2,
3881 (size_t)uap->uaddr1, &timeout);
3882 if (error != 0)
3883 return (error);
3884 tm_p = &timeout;
3885 }
3886 return do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT);
3887 }
3888
3889 static int
3890 __umtx_op_cv_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3891 {
3892 struct timespec *ts, timeout;
3893 int error;
3894
3895 /* Allow a null timespec (wait forever). */
3896 if (uap->uaddr2 == NULL)
3897 ts = NULL;
3898 else {
3899 error = umtx_copyin_timeout32(uap->uaddr2, &timeout);
3900 if (error != 0)
3901 return (error);
3902 ts = &timeout;
3903 }
3904 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val));
3905 }
3906
3907 static int
3908 __umtx_op_rw_rdlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3909 {
3910 struct _umtx_time timeout;
3911 int error;
3912
3913 /* Allow a null timespec (wait forever). */
3914 if (uap->uaddr2 == NULL) {
3915 error = do_rw_rdlock(td, uap->obj, uap->val, 0);
3916 } else {
3917 error = umtx_copyin_umtx_time32(uap->uaddr2,
3918 (size_t)uap->uaddr1, &timeout);
3919 if (error != 0)
3920 return (error);
3921 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout);
3922 }
3923 return (error);
3924 }
3925
3926 static int
3927 __umtx_op_rw_wrlock_compat32(struct thread *td, struct _umtx_op_args *uap)
3928 {
3929 struct _umtx_time timeout;
3930 int error;
3931
3932 /* Allow a null timespec (wait forever). */
3933 if (uap->uaddr2 == NULL) {
3934 error = do_rw_wrlock(td, uap->obj, 0);
3935 } else {
3936 error = umtx_copyin_umtx_time32(uap->uaddr2,
3937 (size_t)uap->uaddr1, &timeout);
3938 if (error != 0)
3939 return (error);
3940 error = do_rw_wrlock(td, uap->obj, &timeout);
3941 }
3942 return (error);
3943 }
3944
3945 static int
3946 __umtx_op_wait_uint_private_compat32(struct thread *td, struct _umtx_op_args *uap)
3947 {
3948 struct _umtx_time *tm_p, timeout;
3949 int error;
3950
3951 if (uap->uaddr2 == NULL)
3952 tm_p = NULL;
3953 else {
3954 error = umtx_copyin_umtx_time32(
3955 uap->uaddr2, (size_t)uap->uaddr1,&timeout);
3956 if (error != 0)
3957 return (error);
3958 tm_p = &timeout;
3959 }
3960 return do_wait(td, uap->obj, uap->val, tm_p, 1, 1);
3961 }
3962
3963 static int
3964 __umtx_op_sem_wait_compat32(struct thread *td, struct _umtx_op_args *uap)
3965 {
3966 struct _umtx_time *tm_p, timeout;
3967 int error;
3968
3969 /* Allow a null timespec (wait forever). */
3970 if (uap->uaddr2 == NULL)
3971 tm_p = NULL;
3972 else {
3973 error = umtx_copyin_umtx_time32(uap->uaddr2,
3974 (size_t)uap->uaddr1, &timeout);
3975 if (error != 0)
3976 return (error);
3977 tm_p = &timeout;
3978 }
3979 return (do_sem_wait(td, uap->obj, tm_p));
3980 }
3981
3982 static int
3983 __umtx_op_nwake_private32(struct thread *td, struct _umtx_op_args *uap)
3984 {
3985 int count = uap->val;
3986 uint32_t uaddrs[BATCH_SIZE];
3987 uint32_t **upp = (uint32_t **)uap->obj;
3988 int tocopy;
3989 int error = 0;
3990 int i, pos = 0;
3991
3992 while (count > 0) {
3993 tocopy = count;
3994 if (tocopy > BATCH_SIZE)
3995 tocopy = BATCH_SIZE;
3996 error = copyin(upp+pos, uaddrs, tocopy * sizeof(uint32_t));
3997 if (error != 0)
3998 break;
3999 for (i = 0; i < tocopy; ++i)
4000 kern_umtx_wake(td, (void *)(intptr_t)uaddrs[i],
4001 INT_MAX, 1);
4002 count -= tocopy;
4003 pos += tocopy;
4004 }
4005 return (error);
4006 }
4007
4008 static _umtx_op_func op_table_compat32[] = {
4009 __umtx_op_lock_umtx_compat32, /* UMTX_OP_LOCK */
4010 __umtx_op_unlock_umtx_compat32, /* UMTX_OP_UNLOCK */
4011 __umtx_op_wait_compat32, /* UMTX_OP_WAIT */
4012 __umtx_op_wake, /* UMTX_OP_WAKE */
4013 __umtx_op_trylock_umutex, /* UMTX_OP_MUTEX_LOCK */
4014 __umtx_op_lock_umutex_compat32, /* UMTX_OP_MUTEX_TRYLOCK */
4015 __umtx_op_unlock_umutex, /* UMTX_OP_MUTEX_UNLOCK */
4016 __umtx_op_set_ceiling, /* UMTX_OP_SET_CEILING */
4017 __umtx_op_cv_wait_compat32, /* UMTX_OP_CV_WAIT*/
4018 __umtx_op_cv_signal, /* UMTX_OP_CV_SIGNAL */
4019 __umtx_op_cv_broadcast, /* UMTX_OP_CV_BROADCAST */
4020 __umtx_op_wait_compat32, /* UMTX_OP_WAIT_UINT */
4021 __umtx_op_rw_rdlock_compat32, /* UMTX_OP_RW_RDLOCK */
4022 __umtx_op_rw_wrlock_compat32, /* UMTX_OP_RW_WRLOCK */
4023 __umtx_op_rw_unlock, /* UMTX_OP_RW_UNLOCK */
4024 __umtx_op_wait_uint_private_compat32, /* UMTX_OP_WAIT_UINT_PRIVATE */
4025 __umtx_op_wake_private, /* UMTX_OP_WAKE_PRIVATE */
4026 __umtx_op_wait_umutex_compat32, /* UMTX_OP_UMUTEX_WAIT */
4027 __umtx_op_wake_umutex, /* UMTX_OP_UMUTEX_WAKE */
4028 __umtx_op_sem_wait_compat32, /* UMTX_OP_SEM_WAIT */
4029 __umtx_op_sem_wake, /* UMTX_OP_SEM_WAKE */
4030 __umtx_op_nwake_private32, /* UMTX_OP_NWAKE_PRIVATE */
4031 __umtx_op_wake2_umutex /* UMTX_OP_UMUTEX_WAKE2 */
4032 };
4033
4034 int
4035 freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap)
4036 {
4037 if ((unsigned)uap->op < UMTX_OP_MAX)
4038 return (*op_table_compat32[uap->op])(td,
4039 (struct _umtx_op_args *)uap);
4040 return (EINVAL);
4041 }
4042 #endif
4043
4044 void
4045 umtx_thread_init(struct thread *td)
4046 {
4047 td->td_umtxq = umtxq_alloc();
4048 td->td_umtxq->uq_thread = td;
4049 }
4050
4051 void
4052 umtx_thread_fini(struct thread *td)
4053 {
4054 umtxq_free(td->td_umtxq);
4055 }
4056
4057 /*
4058 * It will be called when new thread is created, e.g fork().
4059 */
4060 void
4061 umtx_thread_alloc(struct thread *td)
4062 {
4063 struct umtx_q *uq;
4064
4065 uq = td->td_umtxq;
4066 uq->uq_inherited_pri = PRI_MAX;
4067
4068 KASSERT(uq->uq_flags == 0, ("uq_flags != 0"));
4069 KASSERT(uq->uq_thread == td, ("uq_thread != td"));
4070 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL"));
4071 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty"));
4072 }
4073
4074 /*
4075 * exec() hook.
4076 */
4077 static void
4078 umtx_exec_hook(void *arg __unused, struct proc *p __unused,
4079 struct image_params *imgp __unused)
4080 {
4081 umtx_thread_cleanup(curthread);
4082 }
4083
4084 /*
4085 * thread_exit() hook.
4086 */
4087 void
4088 umtx_thread_exit(struct thread *td)
4089 {
4090 umtx_thread_cleanup(td);
4091 }
4092
4093 /*
4094 * clean up umtx data.
4095 */
4096 static void
4097 umtx_thread_cleanup(struct thread *td)
4098 {
4099 struct umtx_q *uq;
4100 struct umtx_pi *pi;
4101
4102 if ((uq = td->td_umtxq) == NULL)
4103 return;
4104
4105 mtx_lock(&umtx_lock);
4106 uq->uq_inherited_pri = PRI_MAX;
4107 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
4108 pi->pi_owner = NULL;
4109 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
4110 }
4111 mtx_unlock(&umtx_lock);
4112 thread_lock(td);
4113 sched_lend_user_prio(td, PRI_MAX);
4114 thread_unlock(td);
4115 }
Cache object: 96421f0851e668e372e5dd5c7c6dbd87
|