1 /*-
2 * Copyright (c) 2004 Tim J. Robbins
3 * Copyright (c) 2002 Doug Rabson
4 * Copyright (c) 2000 Marcel Moolenaar
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mman.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/resource.h>
44 #include <sys/resourcevar.h>
45 #include <sys/syscallsubr.h>
46 #include <sys/sysproto.h>
47 #include <sys/unistd.h>
48
49 #include <machine/frame.h>
50
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54
55 #include <amd64/linux32/linux.h>
56 #include <amd64/linux32/linux32_proto.h>
57 #include <compat/linux/linux_ipc.h>
58 #include <compat/linux/linux_signal.h>
59 #include <compat/linux/linux_util.h>
60
61 struct l_old_select_argv {
62 l_int nfds;
63 l_uintptr_t readfds;
64 l_uintptr_t writefds;
65 l_uintptr_t exceptfds;
66 l_uintptr_t timeout;
67 } __packed;
68
69 int
70 linux_to_bsd_sigaltstack(int lsa)
71 {
72 int bsa = 0;
73
74 if (lsa & LINUX_SS_DISABLE)
75 bsa |= SS_DISABLE;
76 if (lsa & LINUX_SS_ONSTACK)
77 bsa |= SS_ONSTACK;
78 return (bsa);
79 }
80
81 int
82 bsd_to_linux_sigaltstack(int bsa)
83 {
84 int lsa = 0;
85
86 if (bsa & SS_DISABLE)
87 lsa |= LINUX_SS_DISABLE;
88 if (bsa & SS_ONSTACK)
89 lsa |= LINUX_SS_ONSTACK;
90 return (lsa);
91 }
92
93 int
94 linux_execve(struct thread *td, struct linux_execve_args *args)
95 {
96 struct execve_args ap;
97 caddr_t sg;
98 int error;
99 u_int32_t *p32, arg;
100 char **p, *p64;
101 int count;
102
103 sg = stackgap_init();
104 CHECKALTEXIST(td, &sg, args->path);
105
106 #ifdef DEBUG
107 if (ldebug(execve))
108 printf(ARGS(execve, "%s"), args->path);
109 #endif
110
111 ap.fname = args->path;
112
113 if (args->argp != NULL) {
114 count = 0;
115 p32 = (u_int32_t *)args->argp;
116 do {
117 error = copyin(p32++, &arg, sizeof(arg));
118 if (error)
119 return error;
120 count++;
121 } while (arg != 0);
122 p = stackgap_alloc(&sg, count * sizeof(char *));
123 ap.argv = p;
124 p32 = (u_int32_t *)args->argp;
125 do {
126 error = copyin(p32++, &arg, sizeof(arg));
127 if (error)
128 return error;
129 p64 = PTRIN(arg);
130 error = copyout(&p64, p++, sizeof(p64));
131 if (error)
132 return error;
133 } while (arg != 0);
134 }
135 if (args->envp != NULL) {
136 count = 0;
137 p32 = (u_int32_t *)args->envp;
138 do {
139 error = copyin(p32++, &arg, sizeof(arg));
140 if (error)
141 return error;
142 count++;
143 } while (arg != 0);
144 p = stackgap_alloc(&sg, count * sizeof(char *));
145 ap.envv = p;
146 p32 = (u_int32_t *)args->envp;
147 do {
148 error = copyin(p32++, &arg, sizeof(arg));
149 if (error)
150 return error;
151 p64 = PTRIN(arg);
152 error = copyout(&p64, p++, sizeof(p64));
153 if (error)
154 return error;
155 } while (arg != 0);
156 }
157
158 return (execve(td, &ap));
159 }
160
161 struct iovec32 {
162 u_int32_t iov_base;
163 int iov_len;
164 };
165
166 CTASSERT(sizeof(struct iovec32) == 8);
167
168 static int
169 linux32_copyinuio(struct iovec32 *iovp, u_int iovcnt, struct uio **uiop)
170 {
171 struct iovec32 iov32;
172 struct iovec *iov;
173 struct uio *uio;
174 u_int iovlen;
175 int error, i;
176
177 *uiop = NULL;
178 if (iovcnt > UIO_MAXIOV)
179 return (EINVAL);
180 iovlen = iovcnt * sizeof(struct iovec);
181 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
182 iov = (struct iovec *)(uio + 1);
183 for (i = 0; i < iovcnt; i++) {
184 error = copyin(&iovp[i], &iov32, sizeof(struct iovec32));
185 if (error) {
186 free(uio, M_IOV);
187 return (error);
188 }
189 iov[i].iov_base = PTRIN(iov32.iov_base);
190 iov[i].iov_len = iov32.iov_len;
191 }
192 uio->uio_iov = iov;
193 uio->uio_iovcnt = iovcnt;
194 uio->uio_segflg = UIO_USERSPACE;
195 uio->uio_offset = -1;
196 uio->uio_resid = 0;
197 for (i = 0; i < iovcnt; i++) {
198 if (iov->iov_len > INT_MAX - uio->uio_resid) {
199 free(uio, M_IOV);
200 return (EINVAL);
201 }
202 uio->uio_resid += iov->iov_len;
203 iov++;
204 }
205 *uiop = uio;
206 return (0);
207 }
208
209 int
210 linux_readv(struct thread *td, struct linux_readv_args *uap)
211 {
212 struct uio *auio;
213 int error;
214
215 error = linux32_copyinuio(uap->iovp, uap->iovcnt, &auio);
216 if (error)
217 return (error);
218 error = kern_readv(td, uap->fd, auio);
219 free(auio, M_IOV);
220 return (error);
221 }
222
223 int
224 linux_writev(struct thread *td, struct linux_writev_args *uap)
225 {
226 struct uio *auio;
227 int error;
228
229 error = linux32_copyinuio(uap->iovp, uap->iovcnt, &auio);
230 if (error)
231 return (error);
232 error = kern_writev(td, uap->fd, auio);
233 free(auio, M_IOV);
234 return (error);
235 }
236
237 struct l_ipc_kludge {
238 l_uintptr_t msgp;
239 l_long msgtyp;
240 } __packed;
241
242 int
243 linux_ipc(struct thread *td, struct linux_ipc_args *args)
244 {
245
246 switch (args->what & 0xFFFF) {
247 case LINUX_SEMOP: {
248 struct linux_semop_args a;
249
250 a.semid = args->arg1;
251 a.tsops = args->ptr;
252 a.nsops = args->arg2;
253 return (linux_semop(td, &a));
254 }
255 case LINUX_SEMGET: {
256 struct linux_semget_args a;
257
258 a.key = args->arg1;
259 a.nsems = args->arg2;
260 a.semflg = args->arg3;
261 return (linux_semget(td, &a));
262 }
263 case LINUX_SEMCTL: {
264 struct linux_semctl_args a;
265 int error;
266
267 a.semid = args->arg1;
268 a.semnum = args->arg2;
269 a.cmd = args->arg3;
270 error = copyin(args->ptr, &a.arg, sizeof(a.arg));
271 if (error)
272 return (error);
273 return (linux_semctl(td, &a));
274 }
275 case LINUX_MSGSND: {
276 struct linux_msgsnd_args a;
277
278 a.msqid = args->arg1;
279 a.msgp = args->ptr;
280 a.msgsz = args->arg2;
281 a.msgflg = args->arg3;
282 return (linux_msgsnd(td, &a));
283 }
284 case LINUX_MSGRCV: {
285 struct linux_msgrcv_args a;
286
287 a.msqid = args->arg1;
288 a.msgsz = args->arg2;
289 a.msgflg = args->arg3;
290 if ((args->what >> 16) == 0) {
291 struct l_ipc_kludge tmp;
292 int error;
293
294 if (args->ptr == 0)
295 return (EINVAL);
296 error = copyin(args->ptr, &tmp, sizeof(tmp));
297 if (error)
298 return (error);
299 a.msgp = PTRIN(tmp.msgp);
300 a.msgtyp = tmp.msgtyp;
301 } else {
302 a.msgp = args->ptr;
303 a.msgtyp = args->arg5;
304 }
305 return (linux_msgrcv(td, &a));
306 }
307 case LINUX_MSGGET: {
308 struct linux_msgget_args a;
309
310 a.key = args->arg1;
311 a.msgflg = args->arg2;
312 return (linux_msgget(td, &a));
313 }
314 case LINUX_MSGCTL: {
315 struct linux_msgctl_args a;
316
317 a.msqid = args->arg1;
318 a.cmd = args->arg2;
319 a.buf = args->ptr;
320 return (linux_msgctl(td, &a));
321 }
322 case LINUX_SHMAT: {
323 struct linux_shmat_args a;
324
325 a.shmid = args->arg1;
326 a.shmaddr = args->ptr;
327 a.shmflg = args->arg2;
328 a.raddr = PTRIN((l_uint)args->arg3);
329 return (linux_shmat(td, &a));
330 }
331 case LINUX_SHMDT: {
332 struct linux_shmdt_args a;
333
334 a.shmaddr = args->ptr;
335 return (linux_shmdt(td, &a));
336 }
337 case LINUX_SHMGET: {
338 struct linux_shmget_args a;
339
340 a.key = args->arg1;
341 a.size = args->arg2;
342 a.shmflg = args->arg3;
343 return (linux_shmget(td, &a));
344 }
345 case LINUX_SHMCTL: {
346 struct linux_shmctl_args a;
347
348 a.shmid = args->arg1;
349 a.cmd = args->arg2;
350 a.buf = args->ptr;
351 return (linux_shmctl(td, &a));
352 }
353 default:
354 break;
355 }
356
357 return (EINVAL);
358 }
359
360 int
361 linux_old_select(struct thread *td, struct linux_old_select_args *args)
362 {
363 struct l_old_select_argv linux_args;
364 struct linux_select_args newsel;
365 int error;
366
367 #ifdef DEBUG
368 if (ldebug(old_select))
369 printf(ARGS(old_select, "%p"), args->ptr);
370 #endif
371
372 error = copyin(args->ptr, &linux_args, sizeof(linux_args));
373 if (error)
374 return (error);
375
376 newsel.nfds = linux_args.nfds;
377 newsel.readfds = PTRIN(linux_args.readfds);
378 newsel.writefds = PTRIN(linux_args.writefds);
379 newsel.exceptfds = PTRIN(linux_args.exceptfds);
380 newsel.timeout = PTRIN(linux_args.timeout);
381 return (linux_select(td, &newsel));
382 }
383
384 int
385 linux_fork(struct thread *td, struct linux_fork_args *args)
386 {
387 int error;
388
389 #ifdef DEBUG
390 if (ldebug(fork))
391 printf(ARGS(fork, ""));
392 #endif
393
394 if ((error = fork(td, (struct fork_args *)args)) != 0)
395 return (error);
396
397 if (td->td_retval[1] == 1)
398 td->td_retval[0] = 0;
399 return (0);
400 }
401
402 int
403 linux_vfork(struct thread *td, struct linux_vfork_args *args)
404 {
405 int error;
406
407 #ifdef DEBUG
408 if (ldebug(vfork))
409 printf(ARGS(vfork, ""));
410 #endif
411
412 if ((error = vfork(td, (struct vfork_args *)args)) != 0)
413 return (error);
414 /* Are we the child? */
415 if (td->td_retval[1] == 1)
416 td->td_retval[0] = 0;
417 return (0);
418 }
419
420 #define CLONE_VM 0x100
421 #define CLONE_FS 0x200
422 #define CLONE_FILES 0x400
423 #define CLONE_SIGHAND 0x800
424 #define CLONE_PID 0x1000
425
426 int
427 linux_clone(struct thread *td, struct linux_clone_args *args)
428 {
429 int error, ff = RFPROC | RFSTOPPED;
430 struct proc *p2;
431 struct thread *td2;
432 int exit_signal;
433
434 #ifdef DEBUG
435 if (ldebug(clone)) {
436 printf(ARGS(clone, "flags %x, stack %x"),
437 (unsigned int)(uintptr_t)args->flags,
438 (unsigned int)(uintptr_t)args->stack);
439 if (args->flags & CLONE_PID)
440 printf(LMSG("CLONE_PID not yet supported"));
441 }
442 #endif
443
444 if (!args->stack)
445 return (EINVAL);
446
447 exit_signal = args->flags & 0x000000ff;
448 if (exit_signal >= LINUX_NSIG)
449 return (EINVAL);
450
451 if (exit_signal <= LINUX_SIGTBLSZ)
452 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
453
454 if (args->flags & CLONE_VM)
455 ff |= RFMEM;
456 if (args->flags & CLONE_SIGHAND)
457 ff |= RFSIGSHARE;
458 if (!(args->flags & CLONE_FILES))
459 ff |= RFFDG;
460
461 error = fork1(td, ff, 0, &p2);
462 if (error)
463 return (error);
464
465
466 PROC_LOCK(p2);
467 p2->p_sigparent = exit_signal;
468 PROC_UNLOCK(p2);
469 td2 = FIRST_THREAD_IN_PROC(p2);
470 td2->td_frame->tf_rsp = PTROUT(args->stack);
471
472 #ifdef DEBUG
473 if (ldebug(clone))
474 printf(LMSG("clone: successful rfork to %ld, stack %p sig = %d"),
475 (long)p2->p_pid, args->stack, exit_signal);
476 #endif
477
478 /*
479 * Make this runnable after we are finished with it.
480 */
481 mtx_lock_spin(&sched_lock);
482 TD_SET_CAN_RUN(td2);
483 setrunqueue(td2, SRQ_BORING);
484 mtx_unlock_spin(&sched_lock);
485
486 td->td_retval[0] = p2->p_pid;
487 td->td_retval[1] = 0;
488 return (0);
489 }
490
491 /* XXX move */
492 struct l_mmap_argv {
493 l_ulong addr;
494 l_ulong len;
495 l_ulong prot;
496 l_ulong flags;
497 l_ulong fd;
498 l_ulong pgoff;
499 };
500
501 #define STACK_SIZE (2 * 1024 * 1024)
502 #define GUARD_SIZE (4 * PAGE_SIZE)
503
504 static int linux_mmap_common(struct thread *, struct l_mmap_argv *);
505
506 int
507 linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
508 {
509 struct l_mmap_argv linux_args;
510
511 #ifdef DEBUG
512 if (ldebug(mmap2))
513 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
514 (void *)(intptr_t)args->addr, args->len, args->prot,
515 args->flags, args->fd, args->pgoff);
516 #endif
517
518 linux_args.addr = PTROUT(args->addr);
519 linux_args.len = args->len;
520 linux_args.prot = args->prot;
521 linux_args.flags = args->flags;
522 linux_args.fd = args->fd;
523 linux_args.pgoff = args->pgoff;
524
525 return (linux_mmap_common(td, &linux_args));
526 }
527
528 int
529 linux_mmap(struct thread *td, struct linux_mmap_args *args)
530 {
531 int error;
532 struct l_mmap_argv linux_args;
533
534 error = copyin(args->ptr, &linux_args, sizeof(linux_args));
535 if (error)
536 return (error);
537
538 #ifdef DEBUG
539 if (ldebug(mmap))
540 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
541 (void *)(intptr_t)linux_args.addr, linux_args.len,
542 linux_args.prot, linux_args.flags, linux_args.fd,
543 linux_args.pgoff);
544 #endif
545 if ((linux_args.pgoff % PAGE_SIZE) != 0)
546 return (EINVAL);
547 linux_args.pgoff /= PAGE_SIZE;
548
549 return (linux_mmap_common(td, &linux_args));
550 }
551
552 static int
553 linux_mmap_common(struct thread *td, struct l_mmap_argv *linux_args)
554 {
555 struct proc *p = td->td_proc;
556 struct mmap_args /* {
557 caddr_t addr;
558 size_t len;
559 int prot;
560 int flags;
561 int fd;
562 long pad;
563 off_t pos;
564 } */ bsd_args;
565 int error;
566
567 error = 0;
568 bsd_args.flags = 0;
569 if (linux_args->flags & LINUX_MAP_SHARED)
570 bsd_args.flags |= MAP_SHARED;
571 if (linux_args->flags & LINUX_MAP_PRIVATE)
572 bsd_args.flags |= MAP_PRIVATE;
573 if (linux_args->flags & LINUX_MAP_FIXED)
574 bsd_args.flags |= MAP_FIXED;
575 if (linux_args->flags & LINUX_MAP_ANON)
576 bsd_args.flags |= MAP_ANON;
577 else
578 bsd_args.flags |= MAP_NOSYNC;
579 if (linux_args->flags & LINUX_MAP_GROWSDOWN) {
580 bsd_args.flags |= MAP_STACK;
581
582 /* The linux MAP_GROWSDOWN option does not limit auto
583 * growth of the region. Linux mmap with this option
584 * takes as addr the inital BOS, and as len, the initial
585 * region size. It can then grow down from addr without
586 * limit. However, linux threads has an implicit internal
587 * limit to stack size of STACK_SIZE. Its just not
588 * enforced explicitly in linux. But, here we impose
589 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
590 * region, since we can do this with our mmap.
591 *
592 * Our mmap with MAP_STACK takes addr as the maximum
593 * downsize limit on BOS, and as len the max size of
594 * the region. It them maps the top SGROWSIZ bytes,
595 * and autgrows the region down, up to the limit
596 * in addr.
597 *
598 * If we don't use the MAP_STACK option, the effect
599 * of this code is to allocate a stack region of a
600 * fixed size of (STACK_SIZE - GUARD_SIZE).
601 */
602
603 /* This gives us TOS */
604 bsd_args.addr = (caddr_t)PTRIN(linux_args->addr) +
605 linux_args->len;
606
607 if ((caddr_t)PTRIN(bsd_args.addr) >
608 p->p_vmspace->vm_maxsaddr) {
609 /* Some linux apps will attempt to mmap
610 * thread stacks near the top of their
611 * address space. If their TOS is greater
612 * than vm_maxsaddr, vm_map_growstack()
613 * will confuse the thread stack with the
614 * process stack and deliver a SEGV if they
615 * attempt to grow the thread stack past their
616 * current stacksize rlimit. To avoid this,
617 * adjust vm_maxsaddr upwards to reflect
618 * the current stacksize rlimit rather
619 * than the maximum possible stacksize.
620 * It would be better to adjust the
621 * mmap'ed region, but some apps do not check
622 * mmap's return value.
623 */
624 PROC_LOCK(p);
625 p->p_vmspace->vm_maxsaddr =
626 (char *)LINUX32_USRSTACK -
627 lim_cur(p, RLIMIT_STACK);
628 PROC_UNLOCK(p);
629 }
630
631 /* This gives us our maximum stack size */
632 if (linux_args->len > STACK_SIZE - GUARD_SIZE)
633 bsd_args.len = linux_args->len;
634 else
635 bsd_args.len = STACK_SIZE - GUARD_SIZE;
636
637 /* This gives us a new BOS. If we're using VM_STACK, then
638 * mmap will just map the top SGROWSIZ bytes, and let
639 * the stack grow down to the limit at BOS. If we're
640 * not using VM_STACK we map the full stack, since we
641 * don't have a way to autogrow it.
642 */
643 bsd_args.addr -= bsd_args.len;
644 } else {
645 bsd_args.addr = (caddr_t)PTRIN(linux_args->addr);
646 bsd_args.len = linux_args->len;
647 }
648 /*
649 * XXX i386 Linux always emulator forces PROT_READ on (why?)
650 * so we do the same. We add PROT_EXEC to work around buggy
651 * applications (e.g. Java) that take advantage of the fact
652 * that execute permissions are not enforced by x86 CPUs.
653 */
654 bsd_args.prot = linux_args->prot | PROT_EXEC | PROT_READ;
655 if (linux_args->flags & LINUX_MAP_ANON)
656 bsd_args.fd = -1;
657 else
658 bsd_args.fd = linux_args->fd;
659 bsd_args.pos = (off_t)linux_args->pgoff * PAGE_SIZE;
660 bsd_args.pad = 0;
661
662 #ifdef DEBUG
663 if (ldebug(mmap))
664 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
665 __func__,
666 (void *)bsd_args.addr, (int)bsd_args.len, bsd_args.prot,
667 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
668 #endif
669 error = mmap(td, &bsd_args);
670 #ifdef DEBUG
671 if (ldebug(mmap))
672 printf("-> %s() return: 0x%x (0x%08x)\n",
673 __func__, error, (u_int)td->td_retval[0]);
674 #endif
675 return (error);
676 }
677
678 int
679 linux_pipe(struct thread *td, struct linux_pipe_args *args)
680 {
681 int pip[2];
682 int error;
683 register_t reg_rdx;
684
685 #ifdef DEBUG
686 if (ldebug(pipe))
687 printf(ARGS(pipe, "*"));
688 #endif
689
690 reg_rdx = td->td_retval[1];
691 error = pipe(td, 0);
692 if (error) {
693 td->td_retval[1] = reg_rdx;
694 return (error);
695 }
696
697 pip[0] = td->td_retval[0];
698 pip[1] = td->td_retval[1];
699 error = copyout(pip, args->pipefds, 2 * sizeof(int));
700 if (error) {
701 td->td_retval[1] = reg_rdx;
702 return (error);
703 }
704
705 td->td_retval[1] = reg_rdx;
706 td->td_retval[0] = 0;
707 return (0);
708 }
709
710 int
711 linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
712 {
713 l_osigaction_t osa;
714 l_sigaction_t act, oact;
715 int error;
716
717 #ifdef DEBUG
718 if (ldebug(sigaction))
719 printf(ARGS(sigaction, "%d, %p, %p"),
720 args->sig, (void *)args->nsa, (void *)args->osa);
721 #endif
722
723 if (args->nsa != NULL) {
724 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
725 if (error)
726 return (error);
727 act.lsa_handler = osa.lsa_handler;
728 act.lsa_flags = osa.lsa_flags;
729 act.lsa_restorer = osa.lsa_restorer;
730 LINUX_SIGEMPTYSET(act.lsa_mask);
731 act.lsa_mask.__bits[0] = osa.lsa_mask;
732 }
733
734 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
735 args->osa ? &oact : NULL);
736
737 if (args->osa != NULL && !error) {
738 osa.lsa_handler = oact.lsa_handler;
739 osa.lsa_flags = oact.lsa_flags;
740 osa.lsa_restorer = oact.lsa_restorer;
741 osa.lsa_mask = oact.lsa_mask.__bits[0];
742 error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
743 }
744
745 return (error);
746 }
747
748 /*
749 * Linux has two extra args, restart and oldmask. We dont use these,
750 * but it seems that "restart" is actually a context pointer that
751 * enables the signal to happen with a different register set.
752 */
753 int
754 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
755 {
756 sigset_t sigmask;
757 l_sigset_t mask;
758
759 #ifdef DEBUG
760 if (ldebug(sigsuspend))
761 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
762 #endif
763
764 LINUX_SIGEMPTYSET(mask);
765 mask.__bits[0] = args->mask;
766 linux_to_bsd_sigset(&mask, &sigmask);
767 return (kern_sigsuspend(td, sigmask));
768 }
769
770 int
771 linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
772 {
773 l_sigset_t lmask;
774 sigset_t sigmask;
775 int error;
776
777 #ifdef DEBUG
778 if (ldebug(rt_sigsuspend))
779 printf(ARGS(rt_sigsuspend, "%p, %d"),
780 (void *)uap->newset, uap->sigsetsize);
781 #endif
782
783 if (uap->sigsetsize != sizeof(l_sigset_t))
784 return (EINVAL);
785
786 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
787 if (error)
788 return (error);
789
790 linux_to_bsd_sigset(&lmask, &sigmask);
791 return (kern_sigsuspend(td, sigmask));
792 }
793
794 int
795 linux_pause(struct thread *td, struct linux_pause_args *args)
796 {
797 struct proc *p = td->td_proc;
798 sigset_t sigmask;
799
800 #ifdef DEBUG
801 if (ldebug(pause))
802 printf(ARGS(pause, ""));
803 #endif
804
805 PROC_LOCK(p);
806 sigmask = td->td_sigmask;
807 PROC_UNLOCK(p);
808 return (kern_sigsuspend(td, sigmask));
809 }
810
811 int
812 linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
813 {
814 stack_t ss, oss;
815 l_stack_t lss;
816 int error;
817
818 #ifdef DEBUG
819 if (ldebug(sigaltstack))
820 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
821 #endif
822
823 if (uap->uss != NULL) {
824 error = copyin(uap->uss, &lss, sizeof(l_stack_t));
825 if (error)
826 return (error);
827
828 ss.ss_sp = PTRIN(lss.ss_sp);
829 ss.ss_size = lss.ss_size;
830 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
831 }
832 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
833 (uap->uoss != NULL) ? &oss : NULL);
834 if (!error && uap->uoss != NULL) {
835 lss.ss_sp = PTROUT(oss.ss_sp);
836 lss.ss_size = oss.ss_size;
837 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
838 error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
839 }
840
841 return (error);
842 }
843
844 int
845 linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
846 {
847 struct ftruncate_args sa;
848
849 #ifdef DEBUG
850 if (ldebug(ftruncate64))
851 printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
852 (intmax_t)args->length);
853 #endif
854
855 sa.fd = args->fd;
856 sa.pad = 0;
857 sa.length = args->length;
858 return ftruncate(td, &sa);
859 }
860
861 int
862 linux_gettimeofday(struct thread *td, struct linux_gettimeofday_args *uap)
863 {
864 struct timeval atv;
865 l_timeval atv32;
866 struct timezone rtz;
867 int error = 0;
868
869 if (uap->tp) {
870 microtime(&atv);
871 atv32.tv_sec = atv.tv_sec;
872 atv32.tv_usec = atv.tv_usec;
873 error = copyout(&atv32, uap->tp, sizeof (atv32));
874 }
875 if (error == 0 && uap->tzp != NULL) {
876 rtz.tz_minuteswest = tz_minuteswest;
877 rtz.tz_dsttime = tz_dsttime;
878 error = copyout(&rtz, uap->tzp, sizeof (rtz));
879 }
880 return (error);
881 }
882
883 int
884 linux_nanosleep(struct thread *td, struct linux_nanosleep_args *uap)
885 {
886 struct timespec rqt, rmt;
887 struct l_timespec ats32;
888 int error;
889
890 error = copyin(uap->rqtp, &ats32, sizeof(ats32));
891 if (error != 0)
892 return (error);
893 rqt.tv_sec = ats32.tv_sec;
894 rqt.tv_nsec = ats32.tv_nsec;
895 error = kern_nanosleep(td, &rqt, &rmt);
896 if (uap->rmtp != NULL) {
897 ats32.tv_sec = rmt.tv_sec;
898 ats32.tv_nsec = rmt.tv_nsec;
899 error = copyout(&ats32, uap->rmtp, sizeof(ats32));
900 }
901 return (error);
902 }
903
904 int
905 linux_getrusage(struct thread *td, struct linux_getrusage_args *uap)
906 {
907 int error;
908 caddr_t sg;
909 struct l_rusage *p32, s32;
910 struct rusage *p = NULL, s;
911
912 p32 = uap->rusage;
913 if (p32 != NULL) {
914 sg = stackgap_init();
915 p = stackgap_alloc(&sg, sizeof(struct rusage));
916 uap->rusage = (struct l_rusage *)p;
917 }
918 error = getrusage(td, (struct getrusage_args *) uap);
919 if (error != 0)
920 return (error);
921 if (p32 != NULL) {
922 error = copyin(p, &s, sizeof(s));
923 if (error != 0)
924 return (error);
925 s32.ru_utime.tv_sec = s.ru_utime.tv_sec;
926 s32.ru_utime.tv_usec = s.ru_utime.tv_usec;
927 s32.ru_stime.tv_sec = s.ru_stime.tv_sec;
928 s32.ru_stime.tv_usec = s.ru_stime.tv_usec;
929 s32.ru_maxrss = s.ru_maxrss;
930 s32.ru_ixrss = s.ru_ixrss;
931 s32.ru_idrss = s.ru_idrss;
932 s32.ru_isrss = s.ru_isrss;
933 s32.ru_minflt = s.ru_minflt;
934 s32.ru_majflt = s.ru_majflt;
935 s32.ru_nswap = s.ru_nswap;
936 s32.ru_inblock = s.ru_inblock;
937 s32.ru_oublock = s.ru_oublock;
938 s32.ru_msgsnd = s.ru_msgsnd;
939 s32.ru_msgrcv = s.ru_msgrcv;
940 s32.ru_nsignals = s.ru_nsignals;
941 s32.ru_nvcsw = s.ru_nvcsw;
942 s32.ru_nivcsw = s.ru_nivcsw;
943 error = copyout(&s32, p32, sizeof(s32));
944 }
945 return (error);
946 }
947
948 int
949 linux_sched_rr_get_interval(struct thread *td,
950 struct linux_sched_rr_get_interval_args *uap)
951 {
952 struct timespec ts;
953 struct l_timespec ts32;
954 int error;
955
956 error = kern_sched_rr_get_interval(td, uap->pid, &ts);
957 if (error != 0)
958 return (error);
959 ts32.tv_sec = ts.tv_sec;
960 ts32.tv_nsec = ts.tv_nsec;
961 return (copyout(&ts32, uap->interval, sizeof(ts32)));
962 }
963
964 int
965 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
966 {
967 struct mprotect_args bsd_args;
968
969 bsd_args.addr = uap->addr;
970 bsd_args.len = uap->len;
971 bsd_args.prot = uap->prot;
972 /* XXX PROT_READ implies PROT_EXEC; see linux_mmap_common(). */
973 if ((bsd_args.prot & PROT_READ) != 0)
974 bsd_args.prot |= PROT_EXEC;
975 return (mprotect(td, &bsd_args));
976 }
Cache object: e80f6aaef378177d1b0e170f2bdbd613
|