1 /* $NetBSD: linux_sched.c,v 1.79 2021/09/07 11:43:04 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center; by Matthias Scheler.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Linux compatibility module. Try to deal with scheduler related syscalls.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.79 2021/09/07 11:43:04 riastradh Exp $");
39
40 #include <sys/param.h>
41 #include <sys/mount.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
45 #include <sys/syscallargs.h>
46 #include <sys/wait.h>
47 #include <sys/kauth.h>
48 #include <sys/ptrace.h>
49 #include <sys/atomic.h>
50
51 #include <sys/cpu.h>
52
53 #include <compat/linux/common/linux_types.h>
54 #include <compat/linux/common/linux_signal.h>
55 #include <compat/linux/common/linux_emuldata.h>
56 #include <compat/linux/common/linux_ipc.h>
57 #include <compat/linux/common/linux_sem.h>
58 #include <compat/linux/common/linux_exec.h>
59 #include <compat/linux/common/linux_machdep.h>
60
61 #include <compat/linux/linux_syscallargs.h>
62
63 #include <compat/linux/common/linux_sched.h>
64
65 static int linux_clone_nptl(struct lwp *, const struct linux_sys_clone_args *,
66 register_t *);
67
68 /* Unlike Linux, dynamically calculate CPU mask size */
69 #define LINUX_CPU_MASK_SIZE (sizeof(long) * ((ncpu + LONG_BIT - 1) / LONG_BIT))
70
71 #if DEBUG_LINUX
72 #define DPRINTF(x) uprintf x
73 #else
74 #define DPRINTF(x)
75 #endif
76
77 static void
78 linux_child_return(void *arg)
79 {
80 struct lwp *l = arg;
81 struct proc *p = l->l_proc;
82 struct linux_emuldata *led = l->l_emuldata;
83 void *ctp = led->led_child_tidptr;
84 int error;
85
86 if (ctp) {
87 if ((error = copyout(&p->p_pid, ctp, sizeof(p->p_pid))) != 0)
88 printf("%s: LINUX_CLONE_CHILD_SETTID "
89 "failed (child_tidptr = %p, tid = %d error =%d)\n",
90 __func__, ctp, p->p_pid, error);
91 }
92 child_return(arg);
93 }
94
95 int
96 linux_sys_clone(struct lwp *l, const struct linux_sys_clone_args *uap,
97 register_t *retval)
98 {
99 /* {
100 syscallarg(int) flags;
101 syscallarg(void *) stack;
102 syscallarg(void *) parent_tidptr;
103 syscallarg(void *) tls;
104 syscallarg(void *) child_tidptr;
105 } */
106 struct linux_emuldata *led;
107 int flags, sig, error;
108
109 /*
110 * We don't support the Linux CLONE_PID or CLONE_PTRACE flags.
111 */
112 if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE))
113 return EINVAL;
114
115 /*
116 * Thread group implies shared signals. Shared signals
117 * imply shared VM. This matches what Linux kernel does.
118 */
119 if (SCARG(uap, flags) & LINUX_CLONE_THREAD
120 && (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0)
121 return EINVAL;
122 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND
123 && (SCARG(uap, flags) & LINUX_CLONE_VM) == 0)
124 return EINVAL;
125
126 /*
127 * The thread group flavor is implemented totally differently.
128 */
129 if (SCARG(uap, flags) & LINUX_CLONE_THREAD)
130 return linux_clone_nptl(l, uap, retval);
131
132 flags = 0;
133 if (SCARG(uap, flags) & LINUX_CLONE_VM)
134 flags |= FORK_SHAREVM;
135 if (SCARG(uap, flags) & LINUX_CLONE_FS)
136 flags |= FORK_SHARECWD;
137 if (SCARG(uap, flags) & LINUX_CLONE_FILES)
138 flags |= FORK_SHAREFILES;
139 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND)
140 flags |= FORK_SHARESIGS;
141 if (SCARG(uap, flags) & LINUX_CLONE_VFORK)
142 flags |= FORK_PPWAIT;
143
144 sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL;
145 if (sig < 0 || sig >= LINUX__NSIG)
146 return EINVAL;
147 sig = linux_to_native_signo[sig];
148
149 if (SCARG(uap, flags) & LINUX_CLONE_CHILD_SETTID) {
150 led = l->l_emuldata;
151 led->led_child_tidptr = SCARG(uap, child_tidptr);
152 }
153
154 /*
155 * Note that Linux does not provide a portable way of specifying
156 * the stack area; the caller must know if the stack grows up
157 * or down. So, we pass a stack size of 0, so that the code
158 * that makes this adjustment is a noop.
159 */
160 if ((error = fork1(l, flags, sig, SCARG(uap, stack), 0,
161 linux_child_return, NULL, retval)) != 0) {
162 DPRINTF(("%s: fork1: error %d\n", __func__, error));
163 return error;
164 }
165
166 return 0;
167 }
168
169 static int
170 linux_clone_nptl(struct lwp *l, const struct linux_sys_clone_args *uap, register_t *retval)
171 {
172 /* {
173 syscallarg(int) flags;
174 syscallarg(void *) stack;
175 syscallarg(void *) parent_tidptr;
176 syscallarg(void *) tls;
177 syscallarg(void *) child_tidptr;
178 } */
179 struct proc *p;
180 struct lwp *l2;
181 struct linux_emuldata *led;
182 void *parent_tidptr, *tls, *child_tidptr;
183 vaddr_t uaddr;
184 lwpid_t lid;
185 int flags, error;
186
187 p = l->l_proc;
188 flags = SCARG(uap, flags);
189 parent_tidptr = SCARG(uap, parent_tidptr);
190 tls = SCARG(uap, tls);
191 child_tidptr = SCARG(uap, child_tidptr);
192
193 uaddr = uvm_uarea_alloc();
194 if (__predict_false(uaddr == 0)) {
195 return ENOMEM;
196 }
197
198 error = lwp_create(l, p, uaddr, LWP_DETACHED,
199 SCARG(uap, stack), 0, child_return, NULL, &l2, l->l_class,
200 &l->l_sigmask, &l->l_sigstk);
201 if (__predict_false(error)) {
202 DPRINTF(("%s: lwp_create error=%d\n", __func__, error));
203 uvm_uarea_free(uaddr);
204 return error;
205 }
206 lid = l2->l_lid;
207
208 /* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */
209 if (flags & LINUX_CLONE_CHILD_CLEARTID) {
210 led = l2->l_emuldata;
211 led->led_clear_tid = child_tidptr;
212 }
213
214 /* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */
215 if (flags & LINUX_CLONE_PARENT_SETTID) {
216 if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0)
217 printf("%s: LINUX_CLONE_PARENT_SETTID "
218 "failed (parent_tidptr = %p tid = %d error=%d)\n",
219 __func__, parent_tidptr, lid, error);
220 }
221
222 /* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */
223 if (flags & LINUX_CLONE_CHILD_SETTID) {
224 if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0)
225 printf("%s: LINUX_CLONE_CHILD_SETTID "
226 "failed (child_tidptr = %p, tid = %d error=%d)\n",
227 __func__, child_tidptr, lid, error);
228 }
229
230 if (flags & LINUX_CLONE_SETTLS) {
231 error = LINUX_LWP_SETPRIVATE(l2, tls);
232 if (error) {
233 DPRINTF(("%s: LINUX_LWP_SETPRIVATE %d\n", __func__,
234 error));
235 lwp_exit(l2);
236 return error;
237 }
238 }
239
240 /* Set the new LWP running. */
241 lwp_start(l2, 0);
242
243 retval[0] = lid;
244 retval[1] = 0;
245 return 0;
246 }
247
248 /*
249 * linux realtime priority
250 *
251 * - SCHED_RR and SCHED_FIFO tasks have priorities [1,99].
252 *
253 * - SCHED_OTHER tasks don't have realtime priorities.
254 * in particular, sched_param::sched_priority is always 0.
255 */
256
257 #define LINUX_SCHED_RTPRIO_MIN 1
258 #define LINUX_SCHED_RTPRIO_MAX 99
259
260 static int
261 sched_linux2native(int linux_policy, struct linux_sched_param *linux_params,
262 int *native_policy, struct sched_param *native_params)
263 {
264
265 switch (linux_policy) {
266 case LINUX_SCHED_OTHER:
267 if (native_policy != NULL) {
268 *native_policy = SCHED_OTHER;
269 }
270 break;
271
272 case LINUX_SCHED_FIFO:
273 if (native_policy != NULL) {
274 *native_policy = SCHED_FIFO;
275 }
276 break;
277
278 case LINUX_SCHED_RR:
279 if (native_policy != NULL) {
280 *native_policy = SCHED_RR;
281 }
282 break;
283
284 default:
285 return EINVAL;
286 }
287
288 if (linux_params != NULL) {
289 int prio = linux_params->sched_priority;
290
291 KASSERT(native_params != NULL);
292
293 if (linux_policy == LINUX_SCHED_OTHER) {
294 if (prio != 0) {
295 return EINVAL;
296 }
297 native_params->sched_priority = PRI_NONE; /* XXX */
298 } else {
299 if (prio < LINUX_SCHED_RTPRIO_MIN ||
300 prio > LINUX_SCHED_RTPRIO_MAX) {
301 return EINVAL;
302 }
303 native_params->sched_priority =
304 (prio - LINUX_SCHED_RTPRIO_MIN)
305 * (SCHED_PRI_MAX - SCHED_PRI_MIN)
306 / (LINUX_SCHED_RTPRIO_MAX - LINUX_SCHED_RTPRIO_MIN)
307 + SCHED_PRI_MIN;
308 }
309 }
310
311 return 0;
312 }
313
314 static int
315 sched_native2linux(int native_policy, struct sched_param *native_params,
316 int *linux_policy, struct linux_sched_param *linux_params)
317 {
318
319 switch (native_policy) {
320 case SCHED_OTHER:
321 if (linux_policy != NULL) {
322 *linux_policy = LINUX_SCHED_OTHER;
323 }
324 break;
325
326 case SCHED_FIFO:
327 if (linux_policy != NULL) {
328 *linux_policy = LINUX_SCHED_FIFO;
329 }
330 break;
331
332 case SCHED_RR:
333 if (linux_policy != NULL) {
334 *linux_policy = LINUX_SCHED_RR;
335 }
336 break;
337
338 default:
339 panic("%s: unknown policy %d\n", __func__, native_policy);
340 }
341
342 if (native_params != NULL) {
343 int prio = native_params->sched_priority;
344
345 KASSERT(prio >= SCHED_PRI_MIN);
346 KASSERT(prio <= SCHED_PRI_MAX);
347 KASSERT(linux_params != NULL);
348
349 memset(linux_params, 0, sizeof(*linux_params));
350
351 DPRINTF(("%s: native: policy %d, priority %d\n",
352 __func__, native_policy, prio));
353
354 if (native_policy == SCHED_OTHER) {
355 linux_params->sched_priority = 0;
356 } else {
357 linux_params->sched_priority =
358 (prio - SCHED_PRI_MIN)
359 * (LINUX_SCHED_RTPRIO_MAX - LINUX_SCHED_RTPRIO_MIN)
360 / (SCHED_PRI_MAX - SCHED_PRI_MIN)
361 + LINUX_SCHED_RTPRIO_MIN;
362 }
363 DPRINTF(("%s: linux: policy %d, priority %d\n",
364 __func__, -1, linux_params->sched_priority));
365 }
366
367 return 0;
368 }
369
370 int
371 linux_sys_sched_setparam(struct lwp *l, const struct linux_sys_sched_setparam_args *uap, register_t *retval)
372 {
373 /* {
374 syscallarg(linux_pid_t) pid;
375 syscallarg(const struct linux_sched_param *) sp;
376 } */
377 int error, policy;
378 struct linux_sched_param lp;
379 struct sched_param sp;
380
381 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) {
382 error = EINVAL;
383 goto out;
384 }
385
386 error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
387 if (error)
388 goto out;
389
390 /* We need the current policy in Linux terms. */
391 error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL);
392 if (error)
393 goto out;
394 error = sched_native2linux(policy, NULL, &policy, NULL);
395 if (error)
396 goto out;
397
398 error = sched_linux2native(policy, &lp, &policy, &sp);
399 if (error)
400 goto out;
401
402 error = do_sched_setparam(SCARG(uap, pid), 0, policy, &sp);
403 if (error)
404 goto out;
405
406 out:
407 return error;
408 }
409
410 int
411 linux_sys_sched_getparam(struct lwp *l, const struct linux_sys_sched_getparam_args *uap, register_t *retval)
412 {
413 /* {
414 syscallarg(linux_pid_t) pid;
415 syscallarg(struct linux_sched_param *) sp;
416 } */
417 struct linux_sched_param lp;
418 struct sched_param sp;
419 int error, policy;
420
421 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) {
422 error = EINVAL;
423 goto out;
424 }
425
426 error = do_sched_getparam(SCARG(uap, pid), 0, &policy, &sp);
427 if (error)
428 goto out;
429 DPRINTF(("%s: native: policy %d, priority %d\n",
430 __func__, policy, sp.sched_priority));
431
432 error = sched_native2linux(policy, &sp, NULL, &lp);
433 if (error)
434 goto out;
435 DPRINTF(("%s: linux: policy %d, priority %d\n",
436 __func__, policy, lp.sched_priority));
437
438 error = copyout(&lp, SCARG(uap, sp), sizeof(lp));
439 if (error)
440 goto out;
441
442 out:
443 return error;
444 }
445
446 int
447 linux_sys_sched_setscheduler(struct lwp *l, const struct linux_sys_sched_setscheduler_args *uap, register_t *retval)
448 {
449 /* {
450 syscallarg(linux_pid_t) pid;
451 syscallarg(int) policy;
452 syscallarg(cont struct linux_sched_param *) sp;
453 } */
454 int error, policy;
455 struct linux_sched_param lp;
456 struct sched_param sp;
457
458 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) {
459 error = EINVAL;
460 goto out;
461 }
462
463 error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
464 if (error)
465 goto out;
466 DPRINTF(("%s: linux: policy %d, priority %d\n",
467 __func__, SCARG(uap, policy), lp.sched_priority));
468
469 error = sched_linux2native(SCARG(uap, policy), &lp, &policy, &sp);
470 if (error)
471 goto out;
472 DPRINTF(("%s: native: policy %d, priority %d\n",
473 __func__, policy, sp.sched_priority));
474
475 error = do_sched_setparam(SCARG(uap, pid), 0, policy, &sp);
476 if (error)
477 goto out;
478
479 out:
480 return error;
481 }
482
483 int
484 linux_sys_sched_getscheduler(struct lwp *l, const struct linux_sys_sched_getscheduler_args *uap, register_t *retval)
485 {
486 /* {
487 syscallarg(linux_pid_t) pid;
488 } */
489 int error, policy;
490
491 *retval = -1;
492
493 error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL);
494 if (error)
495 goto out;
496
497 error = sched_native2linux(policy, NULL, &policy, NULL);
498 if (error)
499 goto out;
500
501 *retval = policy;
502
503 out:
504 return error;
505 }
506
507 int
508 linux_sys_sched_yield(struct lwp *l, const void *v, register_t *retval)
509 {
510
511 yield();
512 return 0;
513 }
514
515 int
516 linux_sys_sched_get_priority_max(struct lwp *l, const struct linux_sys_sched_get_priority_max_args *uap, register_t *retval)
517 {
518 /* {
519 syscallarg(int) policy;
520 } */
521
522 switch (SCARG(uap, policy)) {
523 case LINUX_SCHED_OTHER:
524 *retval = 0;
525 break;
526 case LINUX_SCHED_FIFO:
527 case LINUX_SCHED_RR:
528 *retval = LINUX_SCHED_RTPRIO_MAX;
529 break;
530 default:
531 return EINVAL;
532 }
533
534 return 0;
535 }
536
537 int
538 linux_sys_sched_get_priority_min(struct lwp *l, const struct linux_sys_sched_get_priority_min_args *uap, register_t *retval)
539 {
540 /* {
541 syscallarg(int) policy;
542 } */
543
544 switch (SCARG(uap, policy)) {
545 case LINUX_SCHED_OTHER:
546 *retval = 0;
547 break;
548 case LINUX_SCHED_FIFO:
549 case LINUX_SCHED_RR:
550 *retval = LINUX_SCHED_RTPRIO_MIN;
551 break;
552 default:
553 return EINVAL;
554 }
555
556 return 0;
557 }
558
559 int
560 linux_sys_exit(struct lwp *l, const struct linux_sys_exit_args *uap, register_t *retval)
561 {
562
563 lwp_exit(l);
564 return 0;
565 }
566
567 #ifndef __m68k__
568 /* Present on everything but m68k */
569 int
570 linux_sys_exit_group(struct lwp *l, const struct linux_sys_exit_group_args *uap, register_t *retval)
571 {
572
573 return sys_exit(l, (const void *)uap, retval);
574 }
575 #endif /* !__m68k__ */
576
577 int
578 linux_sys_set_tid_address(struct lwp *l, const struct linux_sys_set_tid_address_args *uap, register_t *retval)
579 {
580 /* {
581 syscallarg(int *) tidptr;
582 } */
583 struct linux_emuldata *led;
584
585 led = (struct linux_emuldata *)l->l_emuldata;
586 led->led_clear_tid = SCARG(uap, tid);
587 *retval = l->l_lid;
588
589 return 0;
590 }
591
592 /* ARGUSED1 */
593 int
594 linux_sys_gettid(struct lwp *l, const void *v, register_t *retval)
595 {
596
597 *retval = l->l_lid;
598 return 0;
599 }
600
601 /*
602 * The affinity syscalls assume that the layout of our cpu kcpuset is
603 * the same as linux's: a linear bitmask.
604 */
605 int
606 linux_sys_sched_getaffinity(struct lwp *l, const struct linux_sys_sched_getaffinity_args *uap, register_t *retval)
607 {
608 /* {
609 syscallarg(linux_pid_t) pid;
610 syscallarg(unsigned int) len;
611 syscallarg(unsigned long *) mask;
612 } */
613 struct proc *p;
614 struct lwp *t;
615 kcpuset_t *kcset;
616 size_t size;
617 cpuid_t i;
618 int error;
619
620 size = LINUX_CPU_MASK_SIZE;
621 if (SCARG(uap, len) < size)
622 return EINVAL;
623
624 if (SCARG(uap, pid) == 0) {
625 p = curproc;
626 mutex_enter(p->p_lock);
627 t = curlwp;
628 } else {
629 t = lwp_find2(-1, SCARG(uap, pid));
630 if (__predict_false(t == NULL)) {
631 return ESRCH;
632 }
633 p = t->l_proc;
634 KASSERT(mutex_owned(p->p_lock));
635 }
636
637 /* Check the permission */
638 if (kauth_authorize_process(l->l_cred,
639 KAUTH_PROCESS_SCHEDULER_GETAFFINITY, p, NULL, NULL, NULL)) {
640 mutex_exit(p->p_lock);
641 return EPERM;
642 }
643
644 kcpuset_create(&kcset, true);
645 lwp_lock(t);
646 if (t->l_affinity != NULL)
647 kcpuset_copy(kcset, t->l_affinity);
648 else {
649 /*
650 * All available CPUs should be masked when affinity has not
651 * been set.
652 */
653 kcpuset_zero(kcset);
654 for (i = 0; i < ncpu; i++)
655 kcpuset_set(kcset, i);
656 }
657 lwp_unlock(t);
658 mutex_exit(p->p_lock);
659 error = kcpuset_copyout(kcset, (cpuset_t *)SCARG(uap, mask), size);
660 kcpuset_unuse(kcset, NULL);
661 *retval = size;
662 return error;
663 }
664
665 int
666 linux_sys_sched_setaffinity(struct lwp *l, const struct linux_sys_sched_setaffinity_args *uap, register_t *retval)
667 {
668 /* {
669 syscallarg(linux_pid_t) pid;
670 syscallarg(unsigned int) len;
671 syscallarg(unsigned long *) mask;
672 } */
673 struct sys__sched_setaffinity_args ssa;
674 size_t size;
675 pid_t pid;
676 lwpid_t lid;
677
678 size = LINUX_CPU_MASK_SIZE;
679 if (SCARG(uap, len) < size)
680 return EINVAL;
681
682 lid = SCARG(uap, pid);
683 if (lid != 0) {
684 /* Get the canonical PID for the process. */
685 mutex_enter(&proc_lock);
686 struct proc *p = proc_find_lwpid(SCARG(uap, pid));
687 if (p == NULL) {
688 mutex_exit(&proc_lock);
689 return ESRCH;
690 }
691 pid = p->p_pid;
692 mutex_exit(&proc_lock);
693 } else {
694 pid = curproc->p_pid;
695 lid = curlwp->l_lid;
696 }
697
698 SCARG(&ssa, pid) = pid;
699 SCARG(&ssa, lid) = lid;
700 SCARG(&ssa, size) = size;
701 SCARG(&ssa, cpuset) = (cpuset_t *)SCARG(uap, mask);
702
703 return sys__sched_setaffinity(l, &ssa, retval);
704 }
Cache object: 9a0f4b10335d80bbcda4dcd86add0dbd
|