1 /*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $FreeBSD: src/sys/i386/linux/linux_machdep.c,v 1.6.2.4 2001/11/05 19:08:23 marcel Exp $
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/imgact.h>
34 #include <sys/kern_syscall.h>
35 #include <sys/lock.h>
36 #include <sys/mman.h>
37 #include <sys/nlookup.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/resource.h>
41 #include <sys/resourcevar.h>
42 #include <sys/ptrace.h>
43 #include <sys/sysproto.h>
44 #include <sys/thread2.h>
45 #include <sys/unistd.h>
46 #include <sys/wait.h>
47
48 #include <machine/frame.h>
49 #include <machine/psl.h>
50 #include <machine/segments.h>
51 #include <machine/sysarch.h>
52
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_map.h>
56
57 #include <sys/mplock2.h>
58
59 #include "linux.h"
60 #include "linux_proto.h"
61 #include "../linux_ipc.h"
62 #include "../linux_signal.h"
63 #include "../linux_util.h"
64 #include "../linux_emuldata.h"
65
66 struct l_descriptor {
67 l_uint entry_number;
68 l_ulong base_addr;
69 l_uint limit;
70 l_uint seg_32bit:1;
71 l_uint contents:2;
72 l_uint read_exec_only:1;
73 l_uint limit_in_pages:1;
74 l_uint seg_not_present:1;
75 l_uint useable:1;
76 };
77
78 struct l_old_select_argv {
79 l_int nfds;
80 l_fd_set *readfds;
81 l_fd_set *writefds;
82 l_fd_set *exceptfds;
83 struct l_timeval *timeout;
84 };
85
86 int
87 linux_to_bsd_sigaltstack(int lsa)
88 {
89 int bsa = 0;
90
91 if (lsa & LINUX_SS_DISABLE)
92 bsa |= SS_DISABLE;
93 if (lsa & LINUX_SS_ONSTACK)
94 bsa |= SS_ONSTACK;
95 return (bsa);
96 }
97
98 int
99 bsd_to_linux_sigaltstack(int bsa)
100 {
101 int lsa = 0;
102
103 if (bsa & SS_DISABLE)
104 lsa |= LINUX_SS_DISABLE;
105 if (bsa & SS_ONSTACK)
106 lsa |= LINUX_SS_ONSTACK;
107 return (lsa);
108 }
109
110 /*
111 * MPALMOSTSAFE
112 */
113 int
114 sys_linux_execve(struct linux_execve_args *args)
115 {
116 struct nlookupdata nd;
117 struct image_args exec_args;
118 char *path;
119 int error;
120
121 error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS);
122 if (error)
123 return (error);
124 #ifdef DEBUG
125 if (ldebug(execve))
126 kprintf(ARGS(execve, "%s"), path);
127 #endif
128 get_mplock();
129 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
130 bzero(&exec_args, sizeof(exec_args));
131 if (error == 0) {
132 error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE,
133 args->argp, args->envp);
134 }
135 if (error == 0)
136 error = kern_execve(&nd, &exec_args);
137 nlookup_done(&nd);
138
139 /*
140 * The syscall result is returned in registers to the new program.
141 * Linux will register %edx as an atexit function and we must be
142 * sure to set it to 0. XXX
143 */
144 if (error == 0) {
145 args->sysmsg_result64 = 0;
146 if (curproc->p_sysent == &elf_linux_sysvec)
147 error = emuldata_init(curproc, NULL, 0);
148 }
149
150 exec_free_args(&exec_args);
151 linux_free_path(&path);
152
153 if (error < 0) {
154 /* We hit a lethal error condition. Let's die now. */
155 exit1(W_EXITCODE(0, SIGABRT));
156 /* NOTREACHED */
157 }
158 rel_mplock();
159
160 return(error);
161 }
162
163 struct l_ipc_kludge {
164 struct l_msgbuf *msgp;
165 l_long msgtyp;
166 };
167
168 /*
169 * MPALMOSTSAFE
170 */
171 int
172 sys_linux_ipc(struct linux_ipc_args *args)
173 {
174 int error = 0;
175
176 get_mplock();
177
178 switch (args->what & 0xFFFF) {
179 case LINUX_SEMOP: {
180 struct linux_semop_args a;
181
182 a.semid = args->arg1;
183 a.tsops = args->ptr;
184 a.nsops = args->arg2;
185 a.sysmsg_lresult = 0;
186 error = linux_semop(&a);
187 args->sysmsg_lresult = a.sysmsg_lresult;
188 break;
189 }
190 case LINUX_SEMGET: {
191 struct linux_semget_args a;
192
193 a.key = args->arg1;
194 a.nsems = args->arg2;
195 a.semflg = args->arg3;
196 a.sysmsg_lresult = 0;
197 error = linux_semget(&a);
198 args->sysmsg_lresult = a.sysmsg_lresult;
199 break;
200 }
201 case LINUX_SEMCTL: {
202 struct linux_semctl_args a;
203 int error;
204
205 a.semid = args->arg1;
206 a.semnum = args->arg2;
207 a.cmd = args->arg3;
208 a.sysmsg_lresult = 0;
209 error = copyin((caddr_t)args->ptr, &a.arg, sizeof(a.arg));
210 if (error)
211 break;
212 error = linux_semctl(&a);
213 args->sysmsg_lresult = a.sysmsg_lresult;
214 break;
215 }
216 case LINUX_MSGSND: {
217 struct linux_msgsnd_args a;
218
219 a.msqid = args->arg1;
220 a.msgp = args->ptr;
221 a.msgsz = args->arg2;
222 a.msgflg = args->arg3;
223 a.sysmsg_lresult = 0;
224 error = linux_msgsnd(&a);
225 args->sysmsg_lresult = a.sysmsg_lresult;
226 break;
227 }
228 case LINUX_MSGRCV: {
229 struct linux_msgrcv_args a;
230
231 a.msqid = args->arg1;
232 a.msgsz = args->arg2;
233 if (a.msgsz < 0) {
234 error = EINVAL;
235 break;
236 }
237 a.msgflg = args->arg3;
238 a.sysmsg_lresult = 0;
239 if ((args->what >> 16) == 0) {
240 struct l_ipc_kludge tmp;
241 int error;
242
243 if (args->ptr == NULL) {
244 error = EINVAL;
245 break;
246 }
247 error = copyin((caddr_t)args->ptr, &tmp, sizeof(tmp));
248 if (error)
249 break;
250 a.msgp = tmp.msgp;
251 a.msgtyp = tmp.msgtyp;
252 } else {
253 a.msgp = args->ptr;
254 a.msgtyp = args->arg5;
255 }
256 error = linux_msgrcv(&a);
257 args->sysmsg_lresult = a.sysmsg_lresult;
258 break;
259 }
260 case LINUX_MSGGET: {
261 struct linux_msgget_args a;
262
263 a.key = args->arg1;
264 a.msgflg = args->arg2;
265 a.sysmsg_lresult = 0;
266 error = linux_msgget(&a);
267 args->sysmsg_lresult = a.sysmsg_lresult;
268 break;
269 }
270 case LINUX_MSGCTL: {
271 struct linux_msgctl_args a;
272
273 a.msqid = args->arg1;
274 a.cmd = args->arg2;
275 a.buf = args->ptr;
276 a.sysmsg_lresult = 0;
277 error = linux_msgctl(&a);
278 args->sysmsg_lresult = a.sysmsg_lresult;
279 break;
280 }
281 case LINUX_SHMAT: {
282 struct linux_shmat_args a;
283
284 a.shmid = args->arg1;
285 a.shmaddr = args->ptr;
286 a.shmflg = args->arg2;
287 a.raddr = (l_ulong *)args->arg3;
288 a.sysmsg_lresult = 0;
289 error = linux_shmat(&a);
290 args->sysmsg_lresult = a.sysmsg_lresult;
291 break;
292 }
293 case LINUX_SHMDT: {
294 struct linux_shmdt_args a;
295
296 a.shmaddr = args->ptr;
297 a.sysmsg_lresult = 0;
298 error = linux_shmdt(&a);
299 args->sysmsg_lresult = a.sysmsg_lresult;
300 break;
301 }
302 case LINUX_SHMGET: {
303 struct linux_shmget_args a;
304
305 a.key = args->arg1;
306 a.size = args->arg2;
307 a.shmflg = args->arg3;
308 a.sysmsg_lresult = 0;
309 error = linux_shmget(&a);
310 args->sysmsg_lresult = a.sysmsg_lresult;
311 break;
312 }
313 case LINUX_SHMCTL: {
314 struct linux_shmctl_args a;
315
316 a.shmid = args->arg1;
317 a.cmd = args->arg2;
318 a.buf = args->ptr;
319 a.sysmsg_lresult = 0;
320 error = linux_shmctl(&a);
321 args->sysmsg_lresult = a.sysmsg_lresult;
322 break;
323 }
324 default:
325 error = EINVAL;
326 break;
327 }
328 rel_mplock();
329 return(error);
330 }
331
332 /*
333 * MPSAFE
334 */
335 int
336 sys_linux_old_select(struct linux_old_select_args *args)
337 {
338 struct l_old_select_argv linux_args;
339 struct linux_select_args newsel;
340 int error;
341
342 #ifdef DEBUG
343 if (ldebug(old_select))
344 kprintf(ARGS(old_select, "%p"), args->ptr);
345 #endif
346
347 error = copyin((caddr_t)args->ptr, &linux_args, sizeof(linux_args));
348 if (error)
349 return (error);
350
351 newsel.sysmsg_iresult = 0;
352 newsel.nfds = linux_args.nfds;
353 newsel.readfds = linux_args.readfds;
354 newsel.writefds = linux_args.writefds;
355 newsel.exceptfds = linux_args.exceptfds;
356 newsel.timeout = linux_args.timeout;
357 error = sys_linux_select(&newsel);
358 args->sysmsg_iresult = newsel.sysmsg_iresult;
359 return(error);
360 }
361
362 /*
363 * MPSAFE
364 */
365 int
366 sys_linux_fork(struct linux_fork_args *args)
367 {
368 struct lwp *lp = curthread->td_lwp;
369 struct proc *p2;
370 int error;
371
372 get_mplock();
373 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
374 if (error == 0) {
375 emuldata_init(curproc, p2, 0);
376
377 start_forked_proc(lp, p2);
378 args->sysmsg_fds[0] = p2->p_pid;
379 args->sysmsg_fds[1] = 0;
380 }
381 rel_mplock();
382
383 /* Are we the child? */
384 if (args->sysmsg_iresult == 1)
385 args->sysmsg_iresult = 0;
386
387 return (error);
388 }
389
390 /*
391 * MPALMOSTSAFE
392 */
393 int
394 sys_linux_exit_group(struct linux_exit_group_args *args)
395 {
396 struct linux_emuldata *em, *e;
397 struct proc *p;
398 int rval;
399
400 rval = args->rval;
401 EMUL_LOCK();
402
403 em = emuldata_get(curproc);
404
405 if (em->s->refs == 1) {
406 EMUL_UNLOCK();
407 exit1(W_EXITCODE(rval, 0));
408 /* NOTREACHED */
409 return (0);
410 }
411 KKASSERT(em->proc == curproc);
412 em->flags |= EMUL_DIDKILL;
413 em->s->flags |= LINUX_LES_INEXITGROUP;
414 em->s->xstat = W_EXITCODE(rval, 0);
415
416 LIST_REMOVE(em, threads);
417 LIST_INSERT_HEAD(&em->s->threads, em, threads);
418
419 while ((e = LIST_NEXT(em, threads)) != NULL) {
420 LIST_REMOVE(em, threads);
421 LIST_INSERT_AFTER(e, em, threads);
422 if ((e->flags & EMUL_DIDKILL) == 0) {
423 e->flags |= EMUL_DIDKILL;
424 p = e->proc;
425 PHOLD(p);
426 ksignal(p, SIGKILL);
427 PRELE(p);
428 }
429 }
430
431 EMUL_UNLOCK();
432 exit1(W_EXITCODE(rval, 0));
433 /* NOTREACHED */
434
435 return (0);
436 }
437
438 /*
439 * MPSAFE
440 */
441 int
442 sys_linux_vfork(struct linux_vfork_args *args)
443 {
444 struct lwp *lp = curthread->td_lwp;
445 struct proc *p2;
446 int error;
447
448 get_mplock();
449 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
450 if (error == 0) {
451 emuldata_init(curproc, p2, 0);
452
453 start_forked_proc(lp, p2);
454 args->sysmsg_fds[0] = p2->p_pid;
455 args->sysmsg_fds[1] = 0;
456 }
457 rel_mplock();
458
459 if (args->sysmsg_iresult == 1)
460 args->sysmsg_iresult = 0;
461
462 return (error);
463 }
464
465 /*
466 * MPALMOSTSAFE
467 */
468 int
469 sys_linux_clone(struct linux_clone_args *args)
470 {
471 struct segment_descriptor *desc;
472 struct l_user_desc info;
473 int idx;
474 int a[2];
475
476 struct lwp *lp = curthread->td_lwp;
477 int error, ff = RFPROC;
478 struct proc *p2 = NULL;
479 int exit_signal;
480
481 exit_signal = args->flags & 0x000000ff;
482 if (exit_signal >= LINUX_NSIG)
483 return (EINVAL);
484 if (exit_signal <= LINUX_SIGTBLSZ)
485 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
486
487 if (args->flags & LINUX_CLONE_VM)
488 ff |= RFMEM;
489 if (args->flags & LINUX_CLONE_SIGHAND)
490 ff |= RFSIGSHARE;
491 if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
492 ff |= RFFDG;
493 if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS)
494 ff |= RFTHREAD;
495 if (args->flags & LINUX_CLONE_VFORK)
496 ff |= RFPPWAIT;
497 if (args->flags & LINUX_CLONE_PARENT_SETTID) {
498 if (args->parent_tidptr == NULL)
499 return (EINVAL);
500 }
501
502 error = 0;
503
504 get_mplock();
505 error = fork1(lp, ff | RFPGLOCK, &p2);
506 if (error) {
507 rel_mplock();
508 return error;
509 }
510
511 args->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
512 args->sysmsg_fds[1] = 0;
513
514 if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD)) {
515 lwkt_gettoken(&curproc->p_token);
516 while (p2->p_pptr != curproc->p_pptr)
517 proc_reparent(p2, curproc->p_pptr);
518 lwkt_reltoken(&curproc->p_token);
519 }
520
521 emuldata_init(curproc, p2, args->flags);
522 linux_proc_fork(p2, curproc, args->child_tidptr);
523 /*
524 * XXX: this can't happen, p2 is never NULL, or else we'd have
525 * other problems, too (see p2->p_sigparent == ...,
526 * linux_proc_fork and emuldata_init.
527 */
528 if (p2 == NULL) {
529 error = ESRCH;
530 } else {
531 if (args->flags & LINUX_CLONE_PARENT_SETTID) {
532 error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
533 }
534 }
535
536 p2->p_sigparent = exit_signal;
537 if (args->stack) {
538 ONLY_LWP_IN_PROC(p2)->lwp_md.md_regs->tf_esp =
539 (unsigned long)args->stack;
540 }
541
542 if (args->flags & LINUX_CLONE_SETTLS) {
543 error = copyin((void *)curthread->td_lwp->lwp_md.md_regs->tf_esi, &info, sizeof(struct l_user_desc));
544 if (error) {
545 kprintf("copyin of tf_esi to info failed\n");
546 } else {
547 idx = info.entry_number;
548 /*
549 * We understand both our own entries such as the ones
550 * we provide on linux_set_thread_area, as well as the
551 * linux-type entries 6-8.
552 */
553 if ((idx < 6 || idx > 8) && (idx < GTLS_START)) {
554 kprintf("LINUX_CLONE_SETTLS, invalid idx requested: %d\n", idx);
555 goto out;
556 }
557 if (idx < GTLS_START) {
558 idx -= 6;
559 } else {
560 #if 0 /* was SMP */
561 idx -= (GTLS_START + mycpu->gd_cpuid * NGDT);
562 #endif
563 idx -= GTLS_START;
564 }
565 KKASSERT(idx >= 0);
566
567 a[0] = LINUX_LDT_entry_a(&info);
568 a[1] = LINUX_LDT_entry_b(&info);
569 if (p2) {
570 desc = &FIRST_LWP_IN_PROC(p2)->lwp_thread->td_tls.tls[idx];
571 memcpy(desc, &a, sizeof(a));
572 } else {
573 kprintf("linux_clone... we don't have a p2\n");
574 }
575 }
576 }
577 out:
578 if (p2)
579 start_forked_proc(lp, p2);
580
581 rel_mplock();
582 #ifdef DEBUG
583 if (ldebug(clone))
584 kprintf(LMSG("clone: successful rfork to %ld"),
585 (long)p2->p_pid);
586 #endif
587
588 return (error);
589 }
590
591 /* XXX move */
592 struct l_mmap_argv {
593 l_caddr_t addr;
594 l_int len;
595 l_int prot;
596 l_int flags;
597 l_int fd;
598 l_int pos;
599 };
600
601 #define STACK_SIZE (2 * 1024 * 1024)
602 #define GUARD_SIZE (4 * PAGE_SIZE)
603
604 /*
605 * MPALMOSTSAFE
606 */
607 static int
608 linux_mmap_common(caddr_t linux_addr, size_t linux_len, int linux_prot,
609 int linux_flags, int linux_fd, off_t pos, void **res)
610 {
611 struct thread *td = curthread;
612 struct proc *p = td->td_proc;
613 caddr_t addr;
614 void *new;
615 int error, flags, len, prot, fd;
616
617 flags = 0;
618 if (linux_flags & LINUX_MAP_SHARED)
619 flags |= MAP_SHARED;
620 if (linux_flags & LINUX_MAP_PRIVATE)
621 flags |= MAP_PRIVATE;
622 if (linux_flags & LINUX_MAP_FIXED)
623 flags |= MAP_FIXED;
624 if (linux_flags & LINUX_MAP_ANON) {
625 flags |= MAP_ANON;
626 } else {
627 flags |= MAP_NOSYNC;
628 }
629
630 lwkt_gettoken(&curproc->p_vmspace->vm_map.token);
631
632 if (linux_flags & LINUX_MAP_GROWSDOWN) {
633 flags |= MAP_STACK;
634 /* The linux MAP_GROWSDOWN option does not limit auto
635 * growth of the region. Linux mmap with this option
636 * takes as addr the inital BOS, and as len, the initial
637 * region size. It can then grow down from addr without
638 * limit. However, linux threads has an implicit internal
639 * limit to stack size of STACK_SIZE. Its just not
640 * enforced explicitly in linux. But, here we impose
641 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
642 * region, since we can do this with our mmap.
643 *
644 * Our mmap with MAP_STACK takes addr as the maximum
645 * downsize limit on BOS, and as len the max size of
646 * the region. It them maps the top SGROWSIZ bytes,
647 * and autgrows the region down, up to the limit
648 * in addr.
649 *
650 * If we don't use the MAP_STACK option, the effect
651 * of this code is to allocate a stack region of a
652 * fixed size of (STACK_SIZE - GUARD_SIZE).
653 */
654
655 /* This gives us TOS */
656 addr = linux_addr + linux_len;
657
658 if (addr > p->p_vmspace->vm_maxsaddr) {
659 /* Some linux apps will attempt to mmap
660 * thread stacks near the top of their
661 * address space. If their TOS is greater
662 * than vm_maxsaddr, vm_map_growstack()
663 * will confuse the thread stack with the
664 * process stack and deliver a SEGV if they
665 * attempt to grow the thread stack past their
666 * current stacksize rlimit. To avoid this,
667 * adjust vm_maxsaddr upwards to reflect
668 * the current stacksize rlimit rather
669 * than the maximum possible stacksize.
670 * It would be better to adjust the
671 * mmap'ed region, but some apps do not check
672 * mmap's return value.
673 */
674 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
675 p->p_rlimit[RLIMIT_STACK].rlim_cur;
676 }
677
678 /* This gives us our maximum stack size */
679 if (linux_len > STACK_SIZE - GUARD_SIZE) {
680 len = linux_len;
681 } else {
682 len = STACK_SIZE - GUARD_SIZE;
683 }
684 /* This gives us a new BOS. If we're using VM_STACK, then
685 * mmap will just map the top SGROWSIZ bytes, and let
686 * the stack grow down to the limit at BOS. If we're
687 * not using VM_STACK we map the full stack, since we
688 * don't have a way to autogrow it.
689 */
690 addr -= len;
691 } else {
692 addr = linux_addr;
693 len = linux_len;
694 }
695
696 prot = linux_prot;
697
698 if (prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
699 prot |= PROT_READ | PROT_EXEC;
700
701 if (linux_flags & LINUX_MAP_ANON) {
702 fd = -1;
703 } else {
704 fd = linux_fd;
705 }
706
707 #ifdef DEBUG
708 if (ldebug(mmap) || ldebug(mmap2))
709 kprintf("-> (%p, %d, %d, 0x%08x, %d, %lld)\n",
710 addr, len, prot, flags, fd, pos);
711 #endif
712 error = kern_mmap(curproc->p_vmspace, addr, len,
713 prot, flags, fd, pos, &new);
714
715 lwkt_reltoken(&curproc->p_vmspace->vm_map.token);
716
717 if (error == 0)
718 *res = new;
719 return (error);
720 }
721
722 /*
723 * MPSAFE
724 */
725 int
726 sys_linux_mmap(struct linux_mmap_args *args)
727 {
728 struct l_mmap_argv linux_args;
729 int error;
730
731 error = copyin((caddr_t)args->ptr, &linux_args, sizeof(linux_args));
732 if (error)
733 return (error);
734
735 #ifdef DEBUG
736 if (ldebug(mmap))
737 kprintf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
738 (void *)linux_args.addr, linux_args.len, linux_args.prot,
739 linux_args.flags, linux_args.fd, linux_args.pos);
740 #endif
741 error = linux_mmap_common(linux_args.addr, linux_args.len,
742 linux_args.prot, linux_args.flags, linux_args.fd,
743 linux_args.pos, &args->sysmsg_resultp);
744 #ifdef DEBUG
745 if (ldebug(mmap))
746 kprintf("-> %p\n", args->sysmsg_resultp);
747 #endif
748 return(error);
749 }
750
751 /*
752 * MPSAFE
753 */
754 int
755 sys_linux_mmap2(struct linux_mmap2_args *args)
756 {
757 int error;
758
759 #ifdef DEBUG
760 if (ldebug(mmap2))
761 kprintf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
762 (void *)args->addr, args->len, args->prot, args->flags,
763 args->fd, args->pgoff);
764 #endif
765 error = linux_mmap_common((void *)args->addr, args->len, args->prot,
766 args->flags, args->fd, args->pgoff * PAGE_SIZE,
767 &args->sysmsg_resultp);
768 #ifdef DEBUG
769 if (ldebug(mmap2))
770 kprintf("-> %p\n", args->sysmsg_resultp);
771 #endif
772 return (error);
773 }
774
775 /*
776 * MPSAFE
777 */
778 int
779 sys_linux_pipe(struct linux_pipe_args *args)
780 {
781 int error;
782 int reg_edx;
783 struct pipe_args bsd_args;
784
785 #ifdef DEBUG
786 if (ldebug(pipe))
787 kprintf(ARGS(pipe, "*"));
788 #endif
789
790 reg_edx = args->sysmsg_fds[1];
791 error = sys_pipe(&bsd_args);
792 if (error) {
793 args->sysmsg_fds[1] = reg_edx;
794 return (error);
795 }
796
797 error = copyout(bsd_args.sysmsg_fds, args->pipefds, 2*sizeof(int));
798 if (error) {
799 args->sysmsg_fds[1] = reg_edx;
800 return (error);
801 }
802
803 args->sysmsg_fds[1] = reg_edx;
804 args->sysmsg_fds[0] = 0;
805 return (0);
806 }
807
808 /*
809 * XXX: Preliminary
810 */
811 int
812 sys_linux_pipe2(struct linux_pipe2_args *args)
813 {
814 struct thread *td = curthread;
815 int error;
816 int reg_edx;
817 struct pipe_args bsd_args;
818 union fcntl_dat dat;
819
820 reg_edx = args->sysmsg_fds[1];
821 error = sys_pipe(&bsd_args);
822 if (error) {
823 args->sysmsg_fds[1] = reg_edx;
824 return (error);
825 }
826
827 // if (args->flags & LINUX_O_CLOEXEC) {
828 // }
829
830 if (args->flags & LINUX_O_NONBLOCK) {
831 dat.fc_flags = O_NONBLOCK;
832 kern_fcntl(bsd_args.sysmsg_fds[0], F_SETFL, &dat, td->td_ucred);
833 kern_fcntl(bsd_args.sysmsg_fds[1], F_SETFL, &dat, td->td_ucred);
834 }
835
836 error = copyout(bsd_args.sysmsg_fds, args->pipefds, 2*sizeof(int));
837 if (error) {
838 args->sysmsg_fds[1] = reg_edx;
839 return (error);
840 }
841
842 args->sysmsg_fds[1] = reg_edx;
843 args->sysmsg_fds[0] = 0;
844 return (0);
845 }
846
847 /*
848 * MPSAFE
849 */
850 int
851 sys_linux_ioperm(struct linux_ioperm_args *args)
852 {
853 struct sysarch_args sa;
854 struct i386_ioperm_args *iia;
855 caddr_t sg;
856 int error;
857
858 sg = stackgap_init();
859 iia = stackgap_alloc(&sg, sizeof(struct i386_ioperm_args));
860 iia->start = args->start;
861 iia->length = args->length;
862 iia->enable = args->enable;
863 sa.sysmsg_resultp = NULL;
864 sa.op = I386_SET_IOPERM;
865 sa.parms = (char *)iia;
866 error = sys_sysarch(&sa);
867 args->sysmsg_resultp = sa.sysmsg_resultp;
868 return(error);
869 }
870
871 /*
872 * MPSAFE
873 */
874 int
875 sys_linux_iopl(struct linux_iopl_args *args)
876 {
877 struct thread *td = curthread;
878 struct lwp *lp = td->td_lwp;
879 int error;
880
881 if (args->level < 0 || args->level > 3)
882 return (EINVAL);
883 if ((error = priv_check(td, PRIV_ROOT)) != 0)
884 return (error);
885 if (securelevel > 0)
886 return (EPERM);
887 lp->lwp_md.md_regs->tf_eflags =
888 (lp->lwp_md.md_regs->tf_eflags & ~PSL_IOPL) |
889 (args->level * (PSL_IOPL / 3));
890 return (0);
891 }
892
893 /*
894 * MPSAFE
895 */
896 int
897 sys_linux_modify_ldt(struct linux_modify_ldt_args *uap)
898 {
899 int error;
900 caddr_t sg;
901 struct sysarch_args args;
902 struct i386_ldt_args *ldt;
903 struct l_descriptor ld;
904 union descriptor *desc;
905 int size, written;
906
907 sg = stackgap_init();
908
909 if (uap->ptr == NULL)
910 return (EINVAL);
911
912 switch (uap->func) {
913 case 0x00: /* read_ldt */
914 ldt = stackgap_alloc(&sg, sizeof(*ldt));
915 ldt->start = 0;
916 ldt->descs = uap->ptr;
917 ldt->num = uap->bytecount / sizeof(union descriptor);
918 args.op = I386_GET_LDT;
919 args.parms = (char*)ldt;
920 args.sysmsg_iresult = 0;
921 error = sys_sysarch(&args);
922 uap->sysmsg_iresult = args.sysmsg_iresult *
923 sizeof(union descriptor);
924 break;
925 case 0x02: /* read_default_ldt = 0 */
926 size = 5*sizeof(struct l_desc_struct);
927 if (size > uap->bytecount)
928 size = uap->bytecount;
929 for (written = error = 0; written < size && error == 0; written++)
930 error = subyte((char *)uap->ptr + written, 0);
931 uap->sysmsg_iresult = written;
932 break;
933 case 0x01: /* write_ldt */
934 case 0x11: /* write_ldt */
935 if (uap->bytecount != sizeof(ld))
936 return (EINVAL);
937
938 error = copyin(uap->ptr, &ld, sizeof(ld));
939 if (error)
940 return (error);
941
942 ldt = stackgap_alloc(&sg, sizeof(*ldt));
943 desc = stackgap_alloc(&sg, sizeof(*desc));
944 ldt->start = ld.entry_number;
945 ldt->descs = desc;
946 ldt->num = 1;
947 desc->sd.sd_lolimit = (ld.limit & 0x0000ffff);
948 desc->sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
949 desc->sd.sd_lobase = (ld.base_addr & 0x00ffffff);
950 desc->sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
951 desc->sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
952 (ld.contents << 2);
953 desc->sd.sd_dpl = 3;
954 desc->sd.sd_p = (ld.seg_not_present ^ 1);
955 desc->sd.sd_xx = 0;
956 desc->sd.sd_def32 = ld.seg_32bit;
957 desc->sd.sd_gran = ld.limit_in_pages;
958 args.op = I386_SET_LDT;
959 args.parms = (char*)ldt;
960 args.sysmsg_iresult = 0;
961 error = sys_sysarch(&args);
962 uap->sysmsg_iresult = args.sysmsg_iresult;
963 break;
964 default:
965 error = EINVAL;
966 break;
967 }
968
969 return (error);
970 }
971
972 /*
973 * MPALMOSTSAFE
974 */
975 int
976 sys_linux_sigaction(struct linux_sigaction_args *args)
977 {
978 l_osigaction_t osa;
979 l_sigaction_t linux_act, linux_oact;
980 struct sigaction act, oact;
981 int error, sig;
982
983 #ifdef DEBUG
984 if (ldebug(sigaction))
985 kprintf(ARGS(sigaction, "%d, %p, %p"),
986 args->sig, (void *)args->nsa, (void *)args->osa);
987 #endif
988
989 if (args->nsa) {
990 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
991 if (error)
992 return (error);
993 linux_act.lsa_handler = osa.lsa_handler;
994 linux_act.lsa_flags = osa.lsa_flags;
995 linux_act.lsa_restorer = osa.lsa_restorer;
996 LINUX_SIGEMPTYSET(linux_act.lsa_mask);
997 linux_act.lsa_mask.__bits[0] = osa.lsa_mask;
998 linux_to_bsd_sigaction(&linux_act, &act);
999 }
1000
1001 if (args->sig <= LINUX_SIGTBLSZ)
1002 sig = linux_to_bsd_signal[_SIG_IDX(args->sig)];
1003 else
1004 sig = args->sig;
1005
1006 get_mplock();
1007 error = kern_sigaction(sig, args->nsa ? &act : NULL,
1008 args->osa ? &oact : NULL);
1009 rel_mplock();
1010
1011 if (args->osa != NULL && !error) {
1012 bsd_to_linux_sigaction(&oact, &linux_oact);
1013 osa.lsa_handler = linux_oact.lsa_handler;
1014 osa.lsa_flags = linux_oact.lsa_flags;
1015 osa.lsa_restorer = linux_oact.lsa_restorer;
1016 osa.lsa_mask = linux_oact.lsa_mask.__bits[0];
1017 error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
1018 }
1019 return (error);
1020 }
1021
1022 /*
1023 * Linux has two extra args, restart and oldmask. We dont use these,
1024 * but it seems that "restart" is actually a context pointer that
1025 * enables the signal to happen with a different register set.
1026 *
1027 * MPALMOSTSAFE
1028 */
1029 int
1030 sys_linux_sigsuspend(struct linux_sigsuspend_args *args)
1031 {
1032 l_sigset_t linux_mask;
1033 sigset_t mask;
1034 int error;
1035
1036 #ifdef DEBUG
1037 if (ldebug(sigsuspend))
1038 kprintf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
1039 #endif
1040
1041 LINUX_SIGEMPTYSET(mask);
1042 mask.__bits[0] = args->mask;
1043 linux_to_bsd_sigset(&linux_mask, &mask);
1044
1045 get_mplock();
1046 error = kern_sigsuspend(&mask);
1047 rel_mplock();
1048
1049 return(error);
1050 }
1051
1052 /*
1053 * MPALMOSTSAFE
1054 */
1055 int
1056 sys_linux_rt_sigsuspend(struct linux_rt_sigsuspend_args *uap)
1057 {
1058 l_sigset_t linux_mask;
1059 sigset_t mask;
1060 int error;
1061
1062 #ifdef DEBUG
1063 if (ldebug(rt_sigsuspend))
1064 kprintf(ARGS(rt_sigsuspend, "%p, %d"),
1065 (void *)uap->newset, uap->sigsetsize);
1066 #endif
1067
1068 if (uap->sigsetsize != sizeof(l_sigset_t))
1069 return (EINVAL);
1070
1071 error = copyin(uap->newset, &linux_mask, sizeof(l_sigset_t));
1072 if (error)
1073 return (error);
1074
1075 linux_to_bsd_sigset(&linux_mask, &mask);
1076
1077 get_mplock();
1078 error = kern_sigsuspend(&mask);
1079 rel_mplock();
1080
1081 return(error);
1082 }
1083
1084 /*
1085 * MPALMOSTSAFE
1086 */
1087 int
1088 sys_linux_pause(struct linux_pause_args *args)
1089 {
1090 struct thread *td = curthread;
1091 struct lwp *lp = td->td_lwp;
1092 sigset_t mask;
1093 int error;
1094
1095 #ifdef DEBUG
1096 if (ldebug(pause))
1097 kprintf(ARGS(pause, ""));
1098 #endif
1099
1100 mask = lp->lwp_sigmask;
1101
1102 get_mplock();
1103 error = kern_sigsuspend(&mask);
1104 rel_mplock();
1105
1106 return(error);
1107 }
1108
1109 /*
1110 * MPALMOSTSAFE
1111 */
1112 int
1113 sys_linux_sigaltstack(struct linux_sigaltstack_args *uap)
1114 {
1115 stack_t ss, oss;
1116 l_stack_t linux_ss;
1117 int error;
1118
1119 #ifdef DEBUG
1120 if (ldebug(sigaltstack))
1121 kprintf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
1122 #endif
1123
1124 if (uap->uss) {
1125 error = copyin(uap->uss, &linux_ss, sizeof(l_stack_t));
1126 if (error)
1127 return (error);
1128
1129 ss.ss_sp = linux_ss.ss_sp;
1130 ss.ss_size = linux_ss.ss_size;
1131 ss.ss_flags = linux_to_bsd_sigaltstack(linux_ss.ss_flags);
1132 }
1133
1134 get_mplock();
1135 error = kern_sigaltstack(uap->uss ? &ss : NULL,
1136 uap->uoss ? &oss : NULL);
1137 rel_mplock();
1138
1139 if (error == 0 && uap->uoss) {
1140 linux_ss.ss_sp = oss.ss_sp;
1141 linux_ss.ss_size = oss.ss_size;
1142 linux_ss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1143 error = copyout(&linux_ss, uap->uoss, sizeof(l_stack_t));
1144 }
1145
1146 return (error);
1147 }
1148
1149 int
1150 sys_linux_set_thread_area(struct linux_set_thread_area_args *args)
1151 {
1152 struct segment_descriptor *desc;
1153 struct l_user_desc info;
1154 int error;
1155 int idx;
1156 int a[2];
1157 int i;
1158
1159 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1160 if (error)
1161 return (EFAULT);
1162
1163 #ifdef DEBUG
1164 if (ldebug(set_thread_area))
1165 kprintf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1166 info.entry_number,
1167 info.base_addr,
1168 info.limit,
1169 info.seg_32bit,
1170 info.contents,
1171 info.read_exec_only,
1172 info.limit_in_pages,
1173 info.seg_not_present,
1174 info.useable);
1175 #endif
1176
1177 idx = info.entry_number;
1178 if (idx != -1 && (idx < 6 || idx > 8))
1179 return (EINVAL);
1180
1181 if (idx == -1) {
1182 /* -1 means finding the first free TLS entry */
1183 for (i = 0; i < NGTLS; i++) {
1184 /*
1185 * try to determine if the TLS entry is empty by looking
1186 * at the lolimit entry.
1187 */
1188 if (curthread->td_tls.tls[idx].sd_lolimit == 0) {
1189 idx = i;
1190 break;
1191 }
1192 }
1193
1194 if (idx == -1) {
1195 /*
1196 * By now we should have an index. If not, it means
1197 * that no entry is free, so return ESRCH.
1198 */
1199 return (ESRCH);
1200 }
1201 } else {
1202 /* translate the index from Linux to ours */
1203 idx -= 6;
1204 KKASSERT(idx >= 0);
1205 }
1206
1207 /* Tell the caller about the allocated entry number */
1208 #if 0 /* was SMP */
1209 info.entry_number = GTLS_START + mycpu->gd_cpuid * NGDT + idx;
1210 #endif
1211 info.entry_number = GTLS_START + idx;
1212
1213
1214 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1215 if (error)
1216 return (error);
1217
1218 if (LINUX_LDT_empty(&info)) {
1219 a[0] = 0;
1220 a[1] = 0;
1221 } else {
1222 a[0] = LINUX_LDT_entry_a(&info);
1223 a[1] = LINUX_LDT_entry_b(&info);
1224 }
1225
1226 /*
1227 * Update the TLS and the TLS entries in the GDT, but hold a critical
1228 * section as required by set_user_TLS().
1229 */
1230 crit_enter();
1231 desc = &curthread->td_tls.tls[idx];
1232 memcpy(desc, &a, sizeof(a));
1233 set_user_TLS();
1234 crit_exit();
1235
1236 return (0);
1237 }
1238
1239 int
1240 sys_linux_get_thread_area(struct linux_get_thread_area_args *args)
1241 {
1242 struct segment_descriptor *sd;
1243 struct l_desc_struct desc;
1244 struct l_user_desc info;
1245 int error;
1246 int idx;
1247
1248 #ifdef DEBUG
1249 if (ldebug(get_thread_area))
1250 kprintf(ARGS(get_thread_area, "%p"), args->desc);
1251 #endif
1252
1253 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1254 if (error)
1255 return (EFAULT);
1256
1257 idx = info.entry_number;
1258 if ((idx < 6 || idx > 8) && (idx < GTLS_START)) {
1259 kprintf("sys_linux_get_thread_area, invalid idx requested: %d\n", idx);
1260 return (EINVAL);
1261 }
1262
1263 memset(&info, 0, sizeof(info));
1264
1265 /* translate the index from Linux to ours */
1266 info.entry_number = idx;
1267 if (idx < GTLS_START) {
1268 idx -= 6;
1269 } else {
1270 #if 0 /* was SMP */
1271 idx -= (GTLS_START + mycpu->gd_cpuid * NGDT);
1272 #endif
1273 idx -= GTLS_START;
1274
1275 }
1276 KKASSERT(idx >= 0);
1277
1278 sd = &curthread->td_tls.tls[idx];
1279 memcpy(&desc, sd, sizeof(desc));
1280 info.base_addr = LINUX_GET_BASE(&desc);
1281 info.limit = LINUX_GET_LIMIT(&desc);
1282 info.seg_32bit = LINUX_GET_32BIT(&desc);
1283 info.contents = LINUX_GET_CONTENTS(&desc);
1284 info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
1285 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
1286 info.seg_not_present = !LINUX_GET_PRESENT(&desc);
1287 info.useable = LINUX_GET_USEABLE(&desc);
1288
1289 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1290 if (error)
1291 return (EFAULT);
1292
1293 return (0);
1294 }
Cache object: 777ce309b1512c0910f32baad5383170
|