1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
2
3 /*-
4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Emmanuel Dreyfus
17 * 4. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior written
19 * permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 #if 0
37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $");
38 #endif
39
40 #include "opt_compat.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/imgact.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/sched.h>
54 #include <sys/sx.h>
55 #include <sys/umtx.h>
56
57 #ifdef COMPAT_LINUX32
58 #include <machine/../linux32/linux.h>
59 #include <machine/../linux32/linux32_proto.h>
60 #else
61 #include <machine/../linux/linux.h>
62 #include <machine/../linux/linux_proto.h>
63 #endif
64 #include <compat/linux/linux_emul.h>
65 #include <compat/linux/linux_futex.h>
66 #include <compat/linux/linux_util.h>
67
68 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes");
69 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp");
70
71 struct futex;
72
73 struct waiting_proc {
74 uint32_t wp_flags;
75 struct futex *wp_futex;
76 TAILQ_ENTRY(waiting_proc) wp_list;
77 };
78
79 struct futex {
80 struct sx f_lck;
81 uint32_t *f_uaddr; /* user-supplied value, for debug */
82 struct umtx_key f_key;
83 uint32_t f_refcount;
84 uint32_t f_bitset;
85 LIST_ENTRY(futex) f_list;
86 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc;
87 };
88
89 struct futex_list futex_list;
90
91 #define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck)
92 #define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck)
93 #define FUTEX_INIT(f) sx_init_flags(&(f)->f_lck, "ftlk", SX_DUPOK)
94 #define FUTEX_DESTROY(f) sx_destroy(&(f)->f_lck)
95 #define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED)
96
97 struct mtx futex_mtx; /* protects the futex list */
98 #define FUTEXES_LOCK mtx_lock(&futex_mtx)
99 #define FUTEXES_UNLOCK mtx_unlock(&futex_mtx)
100
101 /* flags for futex_get() */
102 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */
103 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
104 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
105 #define FUTEX_SHARED 0x8 /* shared futex */
106
107 /* wp_flags */
108 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
109 * of futex where thread sleep to wp_list
110 * of another futex.
111 */
112 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex
113 * wp_list to prevent double wakeup.
114 */
115
116 /* support.s */
117 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval);
118 int futex_addl(int oparg, uint32_t *uaddr, int *oldval);
119 int futex_orl(int oparg, uint32_t *uaddr, int *oldval);
120 int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
121 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
122
123 static void
124 futex_put(struct futex *f, struct waiting_proc *wp)
125 {
126
127 FUTEX_ASSERT_LOCKED(f);
128 if (wp != NULL) {
129 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
130 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
131 free(wp, M_FUTEX_WP);
132 }
133
134 FUTEXES_LOCK;
135 if (--f->f_refcount == 0) {
136 LIST_REMOVE(f, f_list);
137 FUTEXES_UNLOCK;
138 FUTEX_UNLOCK(f);
139
140 LINUX_CTR3(sys_futex, "futex_put destroy uaddr %p ref %d "
141 "shared %d", f->f_uaddr, f->f_refcount, f->f_key.shared);
142 umtx_key_release(&f->f_key);
143 FUTEX_DESTROY(f);
144 free(f, M_FUTEX);
145 return;
146 }
147
148 LINUX_CTR3(sys_futex, "futex_put uaddr %p ref %d shared %d",
149 f->f_uaddr, f->f_refcount, f->f_key.shared);
150 FUTEXES_UNLOCK;
151 FUTEX_UNLOCK(f);
152 }
153
154 static int
155 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags)
156 {
157 struct futex *f, *tmpf;
158 struct umtx_key key;
159 int error;
160
161 *newf = tmpf = NULL;
162
163 error = umtx_key_get(uaddr, TYPE_FUTEX, (flags & FUTEX_SHARED) ?
164 AUTO_SHARE : THREAD_SHARE, &key);
165 if (error)
166 return (error);
167 retry:
168 FUTEXES_LOCK;
169 LIST_FOREACH(f, &futex_list, f_list) {
170 if (umtx_key_match(&f->f_key, &key)) {
171 if (tmpf != NULL) {
172 FUTEX_UNLOCK(tmpf);
173 FUTEX_DESTROY(tmpf);
174 free(tmpf, M_FUTEX);
175 }
176 if (flags & FUTEX_DONTEXISTS) {
177 FUTEXES_UNLOCK;
178 umtx_key_release(&key);
179 return (EINVAL);
180 }
181
182 /*
183 * Increment refcount of the found futex to
184 * prevent it from deallocation before FUTEX_LOCK()
185 */
186 ++f->f_refcount;
187 FUTEXES_UNLOCK;
188 umtx_key_release(&key);
189
190 FUTEX_LOCK(f);
191 *newf = f;
192 LINUX_CTR3(sys_futex, "futex_get uaddr %p ref %d shared %d",
193 uaddr, f->f_refcount, f->f_key.shared);
194 return (0);
195 }
196 }
197
198 if (flags & FUTEX_DONTCREATE) {
199 FUTEXES_UNLOCK;
200 umtx_key_release(&key);
201 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr);
202 return (0);
203 }
204
205 if (tmpf == NULL) {
206 FUTEXES_UNLOCK;
207 tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO);
208 tmpf->f_uaddr = uaddr;
209 tmpf->f_key = key;
210 tmpf->f_refcount = 1;
211 tmpf->f_bitset = FUTEX_BITSET_MATCH_ANY;
212 FUTEX_INIT(tmpf);
213 TAILQ_INIT(&tmpf->f_waiting_proc);
214
215 /*
216 * Lock the new futex before an insert into the futex_list
217 * to prevent futex usage by other.
218 */
219 FUTEX_LOCK(tmpf);
220 goto retry;
221 }
222
223 LIST_INSERT_HEAD(&futex_list, tmpf, f_list);
224 FUTEXES_UNLOCK;
225
226 LINUX_CTR3(sys_futex, "futex_get uaddr %p ref %d shared %d new",
227 uaddr, tmpf->f_refcount, tmpf->f_key.shared);
228 *newf = tmpf;
229 return (0);
230 }
231
232 static int
233 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
234 uint32_t flags)
235 {
236 int error;
237
238 if (flags & FUTEX_CREATE_WP) {
239 *wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK);
240 (*wp)->wp_flags = 0;
241 }
242 error = futex_get0(uaddr, f, flags);
243 if (error) {
244 if (flags & FUTEX_CREATE_WP)
245 free(*wp, M_FUTEX_WP);
246 return (error);
247 }
248 if (flags & FUTEX_CREATE_WP) {
249 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list);
250 (*wp)->wp_futex = *f;
251 }
252
253 return (error);
254 }
255
256 static int
257 futex_sleep(struct futex *f, struct waiting_proc *wp, unsigned long timeout)
258 {
259 int error;
260
261 FUTEX_ASSERT_LOCKED(f);
262 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
263 f->f_uaddr, wp, timeout, f->f_refcount);
264 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
265 if (wp->wp_flags & FUTEX_WP_REQUEUED) {
266 KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
267 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w"
268 " %p requeued uaddr %p ref %d",
269 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr,
270 wp->wp_futex->f_refcount);
271 futex_put(f, NULL);
272 f = wp->wp_futex;
273 FUTEX_LOCK(f);
274 } else
275 LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p",
276 error, f->f_uaddr, wp);
277
278 futex_put(f, wp);
279 return (error);
280 }
281
282 static int
283 futex_wake(struct futex *f, int n, uint32_t bitset)
284 {
285 struct waiting_proc *wp, *wpt;
286 int count = 0;
287
288 if (bitset == 0)
289 return (EINVAL);
290
291 FUTEX_ASSERT_LOCKED(f);
292 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
293 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d",
294 f->f_uaddr, wp, f->f_refcount);
295 /*
296 * Unless we find a matching bit in
297 * the bitset, continue searching.
298 */
299 if (!(wp->wp_futex->f_bitset & bitset))
300 continue;
301
302 wp->wp_flags |= FUTEX_WP_REMOVED;
303 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
304 wakeup_one(wp);
305 if (++count == n)
306 break;
307 }
308
309 return (count);
310 }
311
312 static int
313 futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
314 {
315 struct waiting_proc *wp, *wpt;
316 int count = 0;
317
318 FUTEX_ASSERT_LOCKED(f);
319 FUTEX_ASSERT_LOCKED(f2);
320
321 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
322 if (++count <= n) {
323 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p",
324 f->f_uaddr, wp);
325 wp->wp_flags |= FUTEX_WP_REMOVED;
326 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
327 wakeup_one(wp);
328 } else {
329 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p",
330 f->f_uaddr, wp, f2->f_uaddr);
331 wp->wp_flags |= FUTEX_WP_REQUEUED;
332 /* Move wp to wp_list of f2 futex */
333 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
334 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list);
335
336 /*
337 * Thread which sleeps on wp after waking should
338 * acquire f2 lock, so increment refcount of f2 to
339 * prevent it from premature deallocation.
340 */
341 wp->wp_futex = f2;
342 FUTEXES_LOCK;
343 ++f2->f_refcount;
344 FUTEXES_UNLOCK;
345 if (count - n >= n2)
346 break;
347 }
348 }
349
350 return (count);
351 }
352
353 static int
354 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts,
355 uint32_t bitset)
356 {
357 struct l_timespec timeout = {0, 0};
358 struct timeval tv = {0, 0};
359 int timeout_hz;
360 int error;
361
362 if (bitset == 0)
363 return (EINVAL);
364 f->f_bitset = bitset;
365
366 if (ts != NULL) {
367 error = copyin(ts, &timeout, sizeof(timeout));
368 if (error)
369 return (error);
370 }
371
372 tv.tv_usec = timeout.tv_sec * 1000000 + timeout.tv_nsec / 1000;
373 timeout_hz = tvtohz(&tv);
374
375 if (timeout.tv_sec == 0 && timeout.tv_nsec == 0)
376 timeout_hz = 0;
377
378 /*
379 * If the user process requests a non null timeout,
380 * make sure we do not turn it into an infinite
381 * timeout because timeout_hz gets null.
382 *
383 * We use a minimal timeout of 1/hz. Maybe it would
384 * make sense to just return ETIMEDOUT without sleeping.
385 */
386 if (((timeout.tv_sec != 0) || (timeout.tv_nsec != 0)) &&
387 (timeout_hz == 0))
388 timeout_hz = 1;
389
390 error = futex_sleep(f, wp, timeout_hz);
391 if (error == EWOULDBLOCK)
392 error = ETIMEDOUT;
393
394 return (error);
395 }
396
397 static int
398 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
399 {
400 int op = (encoded_op >> 28) & 7;
401 int cmp = (encoded_op >> 24) & 15;
402 int oparg = (encoded_op << 8) >> 20;
403 int cmparg = (encoded_op << 20) >> 20;
404 int oldval = 0, ret;
405
406 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
407 oparg = 1 << oparg;
408
409 #ifdef DEBUG
410 if (ldebug(sys_futex))
411 printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
412 "cmparg = %x, uaddr = %p\n",
413 op, cmp, oparg, cmparg, uaddr);
414 #endif
415 /* XXX: Linux verifies access here and returns EFAULT */
416
417 switch (op) {
418 case FUTEX_OP_SET:
419 ret = futex_xchgl(oparg, uaddr, &oldval);
420 break;
421 case FUTEX_OP_ADD:
422 ret = futex_addl(oparg, uaddr, &oldval);
423 break;
424 case FUTEX_OP_OR:
425 ret = futex_orl(oparg, uaddr, &oldval);
426 break;
427 case FUTEX_OP_ANDN:
428 ret = futex_andl(~oparg, uaddr, &oldval);
429 break;
430 case FUTEX_OP_XOR:
431 ret = futex_xorl(oparg, uaddr, &oldval);
432 break;
433 default:
434 ret = -ENOSYS;
435 break;
436 }
437
438 if (ret)
439 return (ret);
440
441 switch (cmp) {
442 case FUTEX_OP_CMP_EQ:
443 return (oldval == cmparg);
444 case FUTEX_OP_CMP_NE:
445 return (oldval != cmparg);
446 case FUTEX_OP_CMP_LT:
447 return (oldval < cmparg);
448 case FUTEX_OP_CMP_GE:
449 return (oldval >= cmparg);
450 case FUTEX_OP_CMP_LE:
451 return (oldval <= cmparg);
452 case FUTEX_OP_CMP_GT:
453 return (oldval > cmparg);
454 default:
455 return (-ENOSYS);
456 }
457 }
458
459 int
460 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
461 {
462 int clockrt, nrwake, op_ret, ret, val;
463 struct linux_emuldata *em;
464 struct waiting_proc *wp;
465 struct futex *f, *f2;
466 int error;
467 uint32_t flags;
468
469 if (args->op & LINUX_FUTEX_PRIVATE_FLAG) {
470 flags = 0;
471 args->op &= ~LINUX_FUTEX_PRIVATE_FLAG;
472 } else
473 flags = FUTEX_SHARED;
474
475 /*
476 * Currently support for switching between CLOCK_MONOTONIC and
477 * CLOCK_REALTIME is not present. However Linux forbids the use of
478 * FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and
479 * FUTEX_WAIT_REQUEUE_PI.
480 */
481 clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME;
482 args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME;
483 if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET &&
484 args->op != LINUX_FUTEX_WAIT_REQUEUE_PI)
485 return (ENOSYS);
486
487 error = 0;
488 f = f2 = NULL;
489
490 switch (args->op) {
491 case LINUX_FUTEX_WAIT:
492 args->val3 = FUTEX_BITSET_MATCH_ANY;
493 /* FALLTHROUGH */
494
495 case LINUX_FUTEX_WAIT_BITSET:
496
497 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d val3 %d",
498 args->uaddr, args->val, args->val3);
499 #ifdef DEBUG
500 if (ldebug(sys_futex))
501 printf(ARGS(sys_futex,
502 "futex_wait uaddr %p val %d val3 %d"),
503 args->uaddr, args->val, args->val3);
504 #endif
505 error = futex_get(args->uaddr, &wp, &f,
506 flags | FUTEX_CREATE_WP);
507 if (error)
508 return (error);
509 error = copyin(args->uaddr, &val, sizeof(val));
510 if (error) {
511 LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
512 error);
513 futex_put(f, wp);
514 return (error);
515 }
516 if (val != args->val) {
517 LINUX_CTR4(sys_futex,
518 "WAIT uaddr %p val %d != uval %d val3 %d",
519 args->uaddr, args->val, val, args->val3);
520 futex_put(f, wp);
521 return (EWOULDBLOCK);
522 }
523
524 error = futex_wait(f, wp, args->timeout, args->val3);
525 break;
526
527 case LINUX_FUTEX_WAKE:
528 args->val3 = FUTEX_BITSET_MATCH_ANY;
529 /* FALLTHROUGH */
530
531 case LINUX_FUTEX_WAKE_BITSET:
532
533 LINUX_CTR3(sys_futex, "WAKE uaddr %p val % d val3 %d",
534 args->uaddr, args->val, args->val3);
535
536 #ifdef DEBUG
537 if (ldebug(sys_futex))
538 printf(ARGS(sys_futex, "futex_wake uaddr %p val %d val3 %d"),
539 args->uaddr, args->val, args->val3);
540 #endif
541 error = futex_get(args->uaddr, NULL, &f,
542 flags | FUTEX_DONTCREATE);
543 if (error)
544 return (error);
545 if (f == NULL) {
546 td->td_retval[0] = 0;
547 return (error);
548 }
549 td->td_retval[0] = futex_wake(f, args->val, args->val3);
550 futex_put(f, NULL);
551 break;
552
553 case LINUX_FUTEX_CMP_REQUEUE:
554
555 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p "
556 "val %d val3 %d uaddr2 %p val2 %d",
557 args->uaddr, args->val, args->val3, args->uaddr2,
558 (int)(unsigned long)args->timeout);
559
560 #ifdef DEBUG
561 if (ldebug(sys_futex))
562 printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p "
563 "val %d val3 %d uaddr2 %p val2 %d"),
564 args->uaddr, args->val, args->val3, args->uaddr2,
565 (int)(unsigned long)args->timeout);
566 #endif
567
568 /*
569 * Linux allows this, we would not, it is an incorrect
570 * usage of declared ABI, so return EINVAL.
571 */
572 if (args->uaddr == args->uaddr2)
573 return (EINVAL);
574 error = futex_get(args->uaddr, NULL, &f, flags);
575 if (error)
576 return (error);
577
578 /*
579 * To avoid deadlocks return EINVAL if second futex
580 * exists at this time.
581 *
582 * Glibc fall back to FUTEX_WAKE in case of any error
583 * returned by FUTEX_CMP_REQUEUE.
584 */
585 error = futex_get(args->uaddr2, NULL, &f2,
586 flags | FUTEX_DONTEXISTS);
587 if (error) {
588 futex_put(f, NULL);
589 return (error);
590 }
591 error = copyin(args->uaddr, &val, sizeof(val));
592 if (error) {
593 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
594 error);
595 futex_put(f2, NULL);
596 futex_put(f, NULL);
597 return (error);
598 }
599 if (val != args->val3) {
600 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d",
601 args->val, val);
602 futex_put(f2, NULL);
603 futex_put(f, NULL);
604 return (EAGAIN);
605 }
606
607 nrwake = (int)(unsigned long)args->timeout;
608 td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake);
609 futex_put(f2, NULL);
610 futex_put(f, NULL);
611 break;
612
613 case LINUX_FUTEX_WAKE_OP:
614
615 LINUX_CTR5(sys_futex, "WAKE_OP "
616 "uaddr %p op %d val %x uaddr2 %p val3 %x",
617 args->uaddr, args->op, args->val,
618 args->uaddr2, args->val3);
619
620 #ifdef DEBUG
621 if (ldebug(sys_futex))
622 printf(ARGS(sys_futex, "futex_wake_op "
623 "uaddr %p op %d val %x uaddr2 %p val3 %x"),
624 args->uaddr, args->op, args->val,
625 args->uaddr2, args->val3);
626 #endif
627 error = futex_get(args->uaddr, NULL, &f, flags);
628 if (error)
629 return (error);
630 if (args->uaddr != args->uaddr2)
631 error = futex_get(args->uaddr2, NULL, &f2, flags);
632 if (error) {
633 futex_put(f, NULL);
634 return (error);
635 }
636
637 /*
638 * This function returns positive number as results and
639 * negative as errors
640 */
641 op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
642
643 if (op_ret < 0) {
644 /* XXX: We don't handle the EFAULT yet. */
645 if (op_ret != -EFAULT) {
646 if (f2 != NULL)
647 futex_put(f2, NULL);
648 futex_put(f, NULL);
649 return (-op_ret);
650 }
651 if (f2 != NULL)
652 futex_put(f2, NULL);
653 futex_put(f, NULL);
654 return (EFAULT);
655 }
656
657 ret = futex_wake(f, args->val, args->val3);
658
659 if (op_ret > 0) {
660 op_ret = 0;
661 nrwake = (int)(unsigned long)args->timeout;
662
663 if (f2 != NULL)
664 op_ret += futex_wake(f2, nrwake, args->val3);
665 else
666 op_ret += futex_wake(f, nrwake, args->val3);
667 ret += op_ret;
668
669 }
670 if (f2 != NULL)
671 futex_put(f2, NULL);
672 futex_put(f, NULL);
673 td->td_retval[0] = ret;
674 break;
675
676 case LINUX_FUTEX_LOCK_PI:
677 /* not yet implemented */
678 linux_msg(td,
679 "linux_sys_futex: "
680 "op LINUX_FUTEX_LOCK_PI not implemented\n");
681 return (ENOSYS);
682
683 case LINUX_FUTEX_UNLOCK_PI:
684 /* not yet implemented */
685 linux_msg(td,
686 "linux_sys_futex: "
687 "op LINUX_FUTEX_UNLOCK_PI not implemented\n");
688 return (ENOSYS);
689
690 case LINUX_FUTEX_TRYLOCK_PI:
691 /* not yet implemented */
692 linux_msg(td,
693 "linux_sys_futex: "
694 "op LINUX_FUTEX_TRYLOCK_PI not implemented\n");
695 return (ENOSYS);
696
697 case LINUX_FUTEX_REQUEUE:
698
699 /*
700 * Glibc does not use this operation since version 2.3.3,
701 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
702 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
703 * FUTEX_REQUEUE returned EINVAL.
704 */
705 em = em_find(td->td_proc, EMUL_DONTLOCK);
706 if ((em->flags & LINUX_XDEPR_REQUEUEOP) == 0) {
707 linux_msg(td,
708 "linux_sys_futex: "
709 "unsupported futex_requeue op\n");
710 em->flags |= LINUX_XDEPR_REQUEUEOP;
711 }
712 return (EINVAL);
713
714 case LINUX_FUTEX_WAIT_REQUEUE_PI:
715 /* not yet implemented */
716 linux_msg(td,
717 "linux_sys_futex: "
718 "op FUTEX_WAIT_REQUEUE_PI not implemented\n");
719 return (ENOSYS);
720
721 case LINUX_FUTEX_CMP_REQUEUE_PI:
722 /* not yet implemented */
723 linux_msg(td,
724 "linux_sys_futex: "
725 "op LINUX_FUTEX_CMP_REQUEUE_PI not implemented\n");
726 return (ENOSYS);
727
728 default:
729 linux_msg(td,
730 "linux_sys_futex: unknown op %d\n", args->op);
731 return (ENOSYS);
732 }
733
734 return (error);
735 }
736
737 int
738 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args)
739 {
740 struct linux_emuldata *em;
741
742 #ifdef DEBUG
743 if (ldebug(set_robust_list))
744 printf(ARGS(set_robust_list, "head %p len %d"),
745 args->head, args->len);
746 #endif
747
748 if (args->len != sizeof(struct linux_robust_list_head))
749 return (EINVAL);
750
751 em = em_find(td->td_proc, EMUL_DOLOCK);
752 em->robust_futexes = args->head;
753 EMUL_UNLOCK(&emul_lock);
754
755 return (0);
756 }
757
758 int
759 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args)
760 {
761 struct linux_emuldata *em;
762 struct linux_robust_list_head *head;
763 l_size_t len = sizeof(struct linux_robust_list_head);
764 int error = 0;
765
766 #ifdef DEBUG
767 if (ldebug(get_robust_list))
768 printf(ARGS(get_robust_list, ""));
769 #endif
770
771 if (!args->pid) {
772 em = em_find(td->td_proc, EMUL_DONTLOCK);
773 head = em->robust_futexes;
774 } else {
775 struct proc *p;
776
777 p = pfind(args->pid);
778 if (p == NULL)
779 return (ESRCH);
780
781 em = em_find(p, EMUL_DONTLOCK);
782 /* XXX: ptrace? */
783 if (priv_check(td, PRIV_CRED_SETUID) ||
784 priv_check(td, PRIV_CRED_SETEUID) ||
785 p_candebug(td, p)) {
786 PROC_UNLOCK(p);
787 return (EPERM);
788 }
789 head = em->robust_futexes;
790
791 PROC_UNLOCK(p);
792 }
793
794 error = copyout(&len, args->len, sizeof(l_size_t));
795 if (error)
796 return (EFAULT);
797
798 error = copyout(head, args->head, sizeof(struct linux_robust_list_head));
799
800 return (error);
801 }
802
803 static int
804 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi)
805 {
806 uint32_t uval, nval, mval;
807 struct futex *f;
808 int error;
809
810 retry:
811 if (copyin(uaddr, &uval, 4))
812 return (EFAULT);
813 if ((uval & FUTEX_TID_MASK) == p->p_pid) {
814 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
815 nval = casuword32(uaddr, uval, mval);
816
817 if (nval == -1)
818 return (EFAULT);
819
820 if (nval != uval)
821 goto retry;
822
823 if (!pi && (uval & FUTEX_WAITERS)) {
824 error = futex_get(uaddr, NULL, &f,
825 FUTEX_DONTCREATE | FUTEX_SHARED);
826 if (error)
827 return (error);
828 if (f != NULL) {
829 futex_wake(f, 1, FUTEX_BITSET_MATCH_ANY);
830 futex_put(f, NULL);
831 }
832 }
833 }
834
835 return (0);
836 }
837
838 static int
839 fetch_robust_entry(struct linux_robust_list **entry,
840 struct linux_robust_list **head, int *pi)
841 {
842 l_ulong uentry;
843
844 if (copyin((const void *)head, &uentry, sizeof(l_ulong)))
845 return (EFAULT);
846
847 *entry = (void *)(uentry & ~1UL);
848 *pi = uentry & 1;
849
850 return (0);
851 }
852
853 /* This walks the list of robust futexes releasing them. */
854 void
855 release_futexes(struct proc *p)
856 {
857 struct linux_robust_list_head *head = NULL;
858 struct linux_robust_list *entry, *next_entry, *pending;
859 unsigned int limit = 2048, pi, next_pi, pip;
860 struct linux_emuldata *em;
861 l_long futex_offset;
862 int rc;
863
864 em = em_find(p, EMUL_DONTLOCK);
865 head = em->robust_futexes;
866
867 if (head == NULL)
868 return;
869
870 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi))
871 return;
872
873 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset)))
874 return;
875
876 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip))
877 return;
878
879 while (entry != &head->list) {
880 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi);
881
882 if (entry != pending)
883 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi))
884 return;
885 if (rc)
886 return;
887
888 entry = next_entry;
889 pi = next_pi;
890
891 if (!--limit)
892 break;
893
894 sched_relinquish(curthread);
895 }
896
897 if (pending)
898 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip);
899 }
Cache object: 82b24a73422d899eec62412b9687be07
|