1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
2
3 /*-
4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Emmanuel Dreyfus
17 * 4. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior written
19 * permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD: releng/8.2/sys/compat/linux/linux_futex.c 215857 2010-11-26 11:02:51Z netchild $");
36 #if 0
37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $");
38 #endif
39
40 #include "opt_compat.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/imgact.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/sched.h>
54 #include <sys/sx.h>
55
56 #ifdef COMPAT_LINUX32
57 #include <machine/../linux32/linux.h>
58 #include <machine/../linux32/linux32_proto.h>
59 #else
60 #include <machine/../linux/linux.h>
61 #include <machine/../linux/linux_proto.h>
62 #endif
63 #include <compat/linux/linux_futex.h>
64 #include <compat/linux/linux_emul.h>
65 #include <compat/linux/linux_util.h>
66
67 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes");
68 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp");
69
70 struct futex;
71
72 struct waiting_proc {
73 uint32_t wp_flags;
74 struct futex *wp_futex;
75 TAILQ_ENTRY(waiting_proc) wp_list;
76 };
77
78 struct futex {
79 struct sx f_lck;
80 uint32_t *f_uaddr;
81 uint32_t f_refcount;
82 LIST_ENTRY(futex) f_list;
83 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc;
84 };
85
86 struct futex_list futex_list;
87
88 #define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck)
89 #define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck)
90 #define FUTEX_INIT(f) sx_init_flags(&(f)->f_lck, "ftlk", 0)
91 #define FUTEX_DESTROY(f) sx_destroy(&(f)->f_lck)
92 #define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED)
93
94 struct mtx futex_mtx; /* protects the futex list */
95 #define FUTEXES_LOCK mtx_lock(&futex_mtx)
96 #define FUTEXES_UNLOCK mtx_unlock(&futex_mtx)
97
98 /* flags for futex_get() */
99 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */
100 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
101 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
102
103 /* wp_flags */
104 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
105 * of futex where thread sleep to wp_list
106 * of another futex.
107 */
108 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex
109 * wp_list to prevent double wakeup.
110 */
111
112 /* support.s */
113 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval);
114 int futex_addl(int oparg, uint32_t *uaddr, int *oldval);
115 int futex_orl(int oparg, uint32_t *uaddr, int *oldval);
116 int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
117 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
118
119 static void
120 futex_put(struct futex *f, struct waiting_proc *wp)
121 {
122
123 FUTEX_ASSERT_LOCKED(f);
124 if (wp != NULL) {
125 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
126 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
127 free(wp, M_FUTEX_WP);
128 }
129
130 FUTEXES_LOCK;
131 if (--f->f_refcount == 0) {
132 LIST_REMOVE(f, f_list);
133 FUTEXES_UNLOCK;
134 FUTEX_UNLOCK(f);
135
136 LINUX_CTR2(sys_futex, "futex_put destroy uaddr %p ref %d",
137 f->f_uaddr, f->f_refcount);
138 FUTEX_DESTROY(f);
139 free(f, M_FUTEX);
140 return;
141 }
142
143 LINUX_CTR2(sys_futex, "futex_put uaddr %p ref %d",
144 f->f_uaddr, f->f_refcount);
145 FUTEXES_UNLOCK;
146 FUTEX_UNLOCK(f);
147 }
148
149 static int
150 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags)
151 {
152 struct futex *f, *tmpf;
153
154 *newf = tmpf = NULL;
155
156 retry:
157 FUTEXES_LOCK;
158 LIST_FOREACH(f, &futex_list, f_list) {
159 if (f->f_uaddr == uaddr) {
160 if (tmpf != NULL) {
161 FUTEX_UNLOCK(tmpf);
162 FUTEX_DESTROY(tmpf);
163 free(tmpf, M_FUTEX);
164 }
165 if (flags & FUTEX_DONTEXISTS) {
166 FUTEXES_UNLOCK;
167 return (EINVAL);
168 }
169
170 /*
171 * Increment refcount of the found futex to
172 * prevent it from deallocation before FUTEX_LOCK()
173 */
174 ++f->f_refcount;
175 FUTEXES_UNLOCK;
176
177 FUTEX_LOCK(f);
178 *newf = f;
179 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d",
180 uaddr, f->f_refcount);
181 return (0);
182 }
183 }
184
185 if (flags & FUTEX_DONTCREATE) {
186 FUTEXES_UNLOCK;
187 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr);
188 return (0);
189 }
190
191 if (tmpf == NULL) {
192 FUTEXES_UNLOCK;
193 tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO);
194 tmpf->f_uaddr = uaddr;
195 tmpf->f_refcount = 1;
196 FUTEX_INIT(tmpf);
197 TAILQ_INIT(&tmpf->f_waiting_proc);
198
199 /*
200 * Lock the new futex before an insert into the futex_list
201 * to prevent futex usage by other.
202 */
203 FUTEX_LOCK(tmpf);
204 goto retry;
205 }
206
207 LIST_INSERT_HEAD(&futex_list, tmpf, f_list);
208 FUTEXES_UNLOCK;
209
210 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d new",
211 uaddr, tmpf->f_refcount);
212 *newf = tmpf;
213 return (0);
214 }
215
216 static int
217 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
218 uint32_t flags)
219 {
220 int error;
221
222 if (flags & FUTEX_CREATE_WP) {
223 *wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK);
224 (*wp)->wp_flags = 0;
225 }
226 error = futex_get0(uaddr, f, flags);
227 if (error) {
228 if (flags & FUTEX_CREATE_WP)
229 free(*wp, M_FUTEX_WP);
230 return (error);
231 }
232 if (flags & FUTEX_CREATE_WP) {
233 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list);
234 (*wp)->wp_futex = *f;
235 }
236
237 return (error);
238 }
239
240 static int
241 futex_sleep(struct futex *f, struct waiting_proc *wp, unsigned long timeout)
242 {
243 int error;
244
245 FUTEX_ASSERT_LOCKED(f);
246 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
247 f->f_uaddr, wp, timeout, f->f_refcount);
248 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
249 if (wp->wp_flags & FUTEX_WP_REQUEUED) {
250 KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
251 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w"
252 " %p requeued uaddr %p ref %d",
253 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr,
254 wp->wp_futex->f_refcount);
255 futex_put(f, NULL);
256 f = wp->wp_futex;
257 FUTEX_LOCK(f);
258 } else
259 LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p",
260 error, f->f_uaddr, wp);
261
262 futex_put(f, wp);
263 return (error);
264 }
265
266 static int
267 futex_wake(struct futex *f, int n)
268 {
269 struct waiting_proc *wp, *wpt;
270 int count = 0;
271
272 FUTEX_ASSERT_LOCKED(f);
273 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
274 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d",
275 f->f_uaddr, wp, f->f_refcount);
276 wp->wp_flags |= FUTEX_WP_REMOVED;
277 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
278 wakeup_one(wp);
279 if (++count == n)
280 break;
281 }
282
283 return (count);
284 }
285
286 static int
287 futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
288 {
289 struct waiting_proc *wp, *wpt;
290 int count = 0;
291
292 FUTEX_ASSERT_LOCKED(f);
293 FUTEX_ASSERT_LOCKED(f2);
294
295 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
296 if (++count <= n) {
297 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p",
298 f->f_uaddr, wp);
299 wp->wp_flags |= FUTEX_WP_REMOVED;
300 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
301 wakeup_one(wp);
302 } else {
303 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p",
304 f->f_uaddr, wp, f2->f_uaddr);
305 wp->wp_flags |= FUTEX_WP_REQUEUED;
306 /* Move wp to wp_list of f2 futex */
307 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
308 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list);
309
310 /*
311 * Thread which sleeps on wp after waking should
312 * acquire f2 lock, so increment refcount of f2 to
313 * prevent it from premature deallocation.
314 */
315 wp->wp_futex = f2;
316 FUTEXES_LOCK;
317 ++f2->f_refcount;
318 FUTEXES_UNLOCK;
319 if (count - n >= n2)
320 break;
321 }
322 }
323
324 return (count);
325 }
326
327 static int
328 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts)
329 {
330 struct l_timespec timeout = {0, 0};
331 struct timeval tv = {0, 0};
332 int timeout_hz;
333 int error;
334
335 if (ts != NULL) {
336 error = copyin(ts, &timeout, sizeof(timeout));
337 if (error)
338 return (error);
339 }
340
341 tv.tv_usec = timeout.tv_sec * 1000000 + timeout.tv_nsec / 1000;
342 timeout_hz = tvtohz(&tv);
343
344 if (timeout.tv_sec == 0 && timeout.tv_nsec == 0)
345 timeout_hz = 0;
346
347 /*
348 * If the user process requests a non null timeout,
349 * make sure we do not turn it into an infinite
350 * timeout because timeout_hz gets null.
351 *
352 * We use a minimal timeout of 1/hz. Maybe it would
353 * make sense to just return ETIMEDOUT without sleeping.
354 */
355 if (((timeout.tv_sec != 0) || (timeout.tv_nsec != 0)) &&
356 (timeout_hz == 0))
357 timeout_hz = 1;
358
359 error = futex_sleep(f, wp, timeout_hz);
360 if (error == EWOULDBLOCK)
361 error = ETIMEDOUT;
362
363 return (error);
364 }
365
366 static int
367 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
368 {
369 int op = (encoded_op >> 28) & 7;
370 int cmp = (encoded_op >> 24) & 15;
371 int oparg = (encoded_op << 8) >> 20;
372 int cmparg = (encoded_op << 20) >> 20;
373 int oldval = 0, ret;
374
375 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
376 oparg = 1 << oparg;
377
378 #ifdef DEBUG
379 if (ldebug(sys_futex))
380 printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
381 "cmparg = %x, uaddr = %p\n",
382 op, cmp, oparg, cmparg, uaddr);
383 #endif
384 /* XXX: linux verifies access here and returns EFAULT */
385
386 switch (op) {
387 case FUTEX_OP_SET:
388 ret = futex_xchgl(oparg, uaddr, &oldval);
389 break;
390 case FUTEX_OP_ADD:
391 ret = futex_addl(oparg, uaddr, &oldval);
392 break;
393 case FUTEX_OP_OR:
394 ret = futex_orl(oparg, uaddr, &oldval);
395 break;
396 case FUTEX_OP_ANDN:
397 ret = futex_andl(~oparg, uaddr, &oldval);
398 break;
399 case FUTEX_OP_XOR:
400 ret = futex_xorl(oparg, uaddr, &oldval);
401 break;
402 default:
403 ret = -ENOSYS;
404 break;
405 }
406
407 if (ret)
408 return (ret);
409
410 switch (cmp) {
411 case FUTEX_OP_CMP_EQ:
412 return (oldval == cmparg);
413 case FUTEX_OP_CMP_NE:
414 return (oldval != cmparg);
415 case FUTEX_OP_CMP_LT:
416 return (oldval < cmparg);
417 case FUTEX_OP_CMP_GE:
418 return (oldval >= cmparg);
419 case FUTEX_OP_CMP_LE:
420 return (oldval <= cmparg);
421 case FUTEX_OP_CMP_GT:
422 return (oldval > cmparg);
423 default:
424 return (-ENOSYS);
425 }
426 }
427
428 int
429 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
430 {
431 int clockrt, nrwake, op_ret, ret, val;
432 struct linux_emuldata *em;
433 struct waiting_proc *wp;
434 struct futex *f, *f2 = NULL;
435 int error = 0;
436
437 /*
438 * Our implementation provides only privates futexes. Most of the apps
439 * should use private futexes but don't claim so. Therefore we treat
440 * all futexes as private by clearing the FUTEX_PRIVATE_FLAG. It works
441 * in most cases (ie. when futexes are not shared on file descriptor
442 * or between different processes.).
443 */
444 args->op = args->op & ~LINUX_FUTEX_PRIVATE_FLAG;
445
446 /*
447 * Currently support for switching between CLOCK_MONOTONIC and
448 * CLOCK_REALTIME is not present. However Linux forbids the use of
449 * FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and
450 * FUTEX_WAIT_REQUEUE_PI.
451 */
452 clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME;
453 args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME;
454 if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET &&
455 args->op != LINUX_FUTEX_WAIT_REQUEUE_PI)
456 return (ENOSYS);
457
458 switch (args->op) {
459 case LINUX_FUTEX_WAIT:
460
461 LINUX_CTR2(sys_futex, "WAIT val %d uaddr %p",
462 args->val, args->uaddr);
463 #ifdef DEBUG
464 if (ldebug(sys_futex))
465 printf(ARGS(sys_futex, "futex_wait val %d uaddr %p"),
466 args->val, args->uaddr);
467 #endif
468 error = futex_get(args->uaddr, &wp, &f, FUTEX_CREATE_WP);
469 if (error)
470 return (error);
471 error = copyin(args->uaddr, &val, sizeof(val));
472 if (error) {
473 LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
474 error);
475 futex_put(f, wp);
476 return (error);
477 }
478 if (val != args->val) {
479 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d != uval %d",
480 args->uaddr, args->val, val);
481 futex_put(f, wp);
482 return (EWOULDBLOCK);
483 }
484
485 error = futex_wait(f, wp, args->timeout);
486 break;
487
488 case LINUX_FUTEX_WAKE:
489
490 LINUX_CTR2(sys_futex, "WAKE val %d uaddr %p",
491 args->val, args->uaddr);
492
493 /*
494 * XXX: Linux is able to cope with different addresses
495 * corresponding to the same mapped memory in the sleeping
496 * and waker process(es).
497 */
498 #ifdef DEBUG
499 if (ldebug(sys_futex))
500 printf(ARGS(sys_futex, "futex_wake val %d uaddr %p"),
501 args->val, args->uaddr);
502 #endif
503 error = futex_get(args->uaddr, NULL, &f, FUTEX_DONTCREATE);
504 if (error)
505 return (error);
506 if (f == NULL) {
507 td->td_retval[0] = 0;
508 return (error);
509 }
510 td->td_retval[0] = futex_wake(f, args->val);
511 futex_put(f, NULL);
512 break;
513
514 case LINUX_FUTEX_CMP_REQUEUE:
515
516 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p "
517 "val %d val3 %d uaddr2 %p val2 %d",
518 args->uaddr, args->val, args->val3, args->uaddr2,
519 (int)(unsigned long)args->timeout);
520
521 #ifdef DEBUG
522 if (ldebug(sys_futex))
523 printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p "
524 "val %d val3 %d uaddr2 %p val2 %d"),
525 args->uaddr, args->val, args->val3, args->uaddr2,
526 (int)(unsigned long)args->timeout);
527 #endif
528
529 /*
530 * Linux allows this, we would not, it is an incorrect
531 * usage of declared ABI, so return EINVAL.
532 */
533 if (args->uaddr == args->uaddr2)
534 return (EINVAL);
535 error = futex_get0(args->uaddr, &f, 0);
536 if (error)
537 return (error);
538
539 /*
540 * To avoid deadlocks return EINVAL if second futex
541 * exists at this time. Otherwise create the new futex
542 * and ignore false positive LOR which thus happens.
543 *
544 * Glibc fall back to FUTEX_WAKE in case of any error
545 * returned by FUTEX_CMP_REQUEUE.
546 */
547 error = futex_get0(args->uaddr2, &f2, FUTEX_DONTEXISTS);
548 if (error) {
549 futex_put(f, NULL);
550 return (error);
551 }
552 error = copyin(args->uaddr, &val, sizeof(val));
553 if (error) {
554 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
555 error);
556 futex_put(f2, NULL);
557 futex_put(f, NULL);
558 return (error);
559 }
560 if (val != args->val3) {
561 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d",
562 args->val, val);
563 futex_put(f2, NULL);
564 futex_put(f, NULL);
565 return (EAGAIN);
566 }
567
568 nrwake = (int)(unsigned long)args->timeout;
569 td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake);
570 futex_put(f2, NULL);
571 futex_put(f, NULL);
572 break;
573
574 case LINUX_FUTEX_WAKE_OP:
575
576 LINUX_CTR5(sys_futex, "WAKE_OP "
577 "uaddr %p op %d val %x uaddr2 %p val3 %x",
578 args->uaddr, args->op, args->val,
579 args->uaddr2, args->val3);
580
581 #ifdef DEBUG
582 if (ldebug(sys_futex))
583 printf(ARGS(sys_futex, "futex_wake_op "
584 "uaddr %p op %d val %x uaddr2 %p val3 %x"),
585 args->uaddr, args->op, args->val,
586 args->uaddr2, args->val3);
587 #endif
588 error = futex_get0(args->uaddr, &f, 0);
589 if (error)
590 return (error);
591 if (args->uaddr != args->uaddr2)
592 error = futex_get0(args->uaddr2, &f2, 0);
593 if (error) {
594 futex_put(f, NULL);
595 return (error);
596 }
597
598 /*
599 * This function returns positive number as results and
600 * negative as errors
601 */
602 op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
603
604 if (op_ret < 0) {
605 /* XXX: We don't handle the EFAULT yet. */
606 if (op_ret != -EFAULT) {
607 if (f2 != NULL)
608 futex_put(f2, NULL);
609 futex_put(f, NULL);
610 return (-op_ret);
611 }
612 if (f2 != NULL)
613 futex_put(f2, NULL);
614 futex_put(f, NULL);
615 return (EFAULT);
616 }
617
618 ret = futex_wake(f, args->val);
619
620 if (op_ret > 0) {
621 op_ret = 0;
622 nrwake = (int)(unsigned long)args->timeout;
623
624 if (f2 != NULL)
625 op_ret += futex_wake(f2, nrwake);
626 else
627 op_ret += futex_wake(f, nrwake);
628 ret += op_ret;
629
630 }
631 if (f2 != NULL)
632 futex_put(f2, NULL);
633 futex_put(f, NULL);
634 td->td_retval[0] = ret;
635 break;
636
637 case LINUX_FUTEX_LOCK_PI:
638 /* not yet implemented */
639 linux_msg(td,
640 "linux_sys_futex: "
641 "op LINUX_FUTEX_LOCK_PI not implemented\n");
642 return (ENOSYS);
643
644 case LINUX_FUTEX_UNLOCK_PI:
645 /* not yet implemented */
646 linux_msg(td,
647 "linux_sys_futex: "
648 "op LINUX_FUTEX_UNLOCK_PI not implemented\n");
649 return (ENOSYS);
650
651 case LINUX_FUTEX_TRYLOCK_PI:
652 /* not yet implemented */
653 linux_msg(td,
654 "linux_sys_futex: "
655 "op LINUX_FUTEX_TRYLOCK_PI not implemented\n");
656 return (ENOSYS);
657
658 case LINUX_FUTEX_REQUEUE:
659
660 /*
661 * Glibc does not use this operation since version 2.3.3,
662 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
663 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
664 * FUTEX_REQUEUE returned EINVAL.
665 */
666 em = em_find(td->td_proc, EMUL_DONTLOCK);
667 if (em->used_requeue == 0) {
668 linux_msg(td,
669 "linux_sys_futex: "
670 "unsupported futex_requeue op\n");
671 em->used_requeue = 1;
672 }
673 return (EINVAL);
674
675 case LINUX_FUTEX_WAIT_BITSET:
676 /* not yet implemented */
677 linux_msg(td,
678 "linux_sys_futex: "
679 "op FUTEX_WAIT_BITSET not implemented\n");
680 return (ENOSYS);
681
682 case LINUX_FUTEX_WAIT_REQUEUE_PI:
683 /* not yet implemented */
684 linux_msg(td,
685 "linux_sys_futex: "
686 "op FUTEX_WAIT_REQUEUE_PI not implemented\n");
687 return (ENOSYS);
688
689 default:
690 linux_msg(td,
691 "linux_sys_futex: unknown op %d\n", args->op);
692 return (ENOSYS);
693 }
694
695 return (error);
696 }
697
698 int
699 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args)
700 {
701 struct linux_emuldata *em;
702
703 #ifdef DEBUG
704 if (ldebug(set_robust_list))
705 printf(ARGS(set_robust_list, "head %p len %d"),
706 args->head, args->len);
707 #endif
708
709 if (args->len != sizeof(struct linux_robust_list_head))
710 return (EINVAL);
711
712 em = em_find(td->td_proc, EMUL_DOLOCK);
713 em->robust_futexes = args->head;
714 EMUL_UNLOCK(&emul_lock);
715
716 return (0);
717 }
718
719 int
720 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args)
721 {
722 struct linux_emuldata *em;
723 struct linux_robust_list_head *head;
724 l_size_t len = sizeof(struct linux_robust_list_head);
725 int error = 0;
726
727 #ifdef DEBUG
728 if (ldebug(get_robust_list))
729 printf(ARGS(get_robust_list, ""));
730 #endif
731
732 if (!args->pid) {
733 em = em_find(td->td_proc, EMUL_DONTLOCK);
734 head = em->robust_futexes;
735 } else {
736 struct proc *p;
737
738 p = pfind(args->pid);
739 if (p == NULL)
740 return (ESRCH);
741
742 em = em_find(p, EMUL_DONTLOCK);
743 /* XXX: ptrace? */
744 if (priv_check(td, PRIV_CRED_SETUID) ||
745 priv_check(td, PRIV_CRED_SETEUID) ||
746 p_candebug(td, p)) {
747 PROC_UNLOCK(p);
748 return (EPERM);
749 }
750 head = em->robust_futexes;
751
752 PROC_UNLOCK(p);
753 }
754
755 error = copyout(&len, args->len, sizeof(l_size_t));
756 if (error)
757 return (EFAULT);
758
759 error = copyout(head, args->head, sizeof(struct linux_robust_list_head));
760
761 return (error);
762 }
763
764 static int
765 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi)
766 {
767 uint32_t uval, nval, mval;
768 struct futex *f;
769 int error;
770
771 retry:
772 if (copyin(uaddr, &uval, 4))
773 return (EFAULT);
774 if ((uval & FUTEX_TID_MASK) == p->p_pid) {
775 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
776 nval = casuword32(uaddr, uval, mval);
777
778 if (nval == -1)
779 return (EFAULT);
780
781 if (nval != uval)
782 goto retry;
783
784 if (!pi && (uval & FUTEX_WAITERS)) {
785 error = futex_get(uaddr, NULL, &f,
786 FUTEX_DONTCREATE);
787 if (error)
788 return (error);
789 if (f != NULL) {
790 futex_wake(f, 1);
791 futex_put(f, NULL);
792 }
793 }
794 }
795
796 return (0);
797 }
798
799 static int
800 fetch_robust_entry(struct linux_robust_list **entry,
801 struct linux_robust_list **head, int *pi)
802 {
803 l_ulong uentry;
804
805 if (copyin((const void *)head, &uentry, sizeof(l_ulong)))
806 return (EFAULT);
807
808 *entry = (void *)(uentry & ~1UL);
809 *pi = uentry & 1;
810
811 return (0);
812 }
813
814 /* This walks the list of robust futexes releasing them. */
815 void
816 release_futexes(struct proc *p)
817 {
818 struct linux_robust_list_head *head = NULL;
819 struct linux_robust_list *entry, *next_entry, *pending;
820 unsigned int limit = 2048, pi, next_pi, pip;
821 struct linux_emuldata *em;
822 l_long futex_offset;
823 int rc;
824
825 em = em_find(p, EMUL_DONTLOCK);
826 head = em->robust_futexes;
827
828 if (head == NULL)
829 return;
830
831 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi))
832 return;
833
834 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset)))
835 return;
836
837 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip))
838 return;
839
840 while (entry != &head->list) {
841 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi);
842
843 if (entry != pending)
844 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi))
845 return;
846 if (rc)
847 return;
848
849 entry = next_entry;
850 pi = next_pi;
851
852 if (!--limit)
853 break;
854
855 sched_relinquish(curthread);
856 }
857
858 if (pending)
859 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip);
860 }
Cache object: a553010056419c0a8e54f445f7fee338
|