1 /*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/9.2/sys/i386/linux/linux_machdep.c 248532 2013-03-19 20:18:30Z jkim $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/capability.h>
35 #include <sys/file.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mman.h>
41 #include <sys/mutex.h>
42 #include <sys/sx.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/queue.h>
46 #include <sys/resource.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/sysproto.h>
51 #include <sys/unistd.h>
52 #include <sys/wait.h>
53 #include <sys/sched.h>
54
55 #include <machine/frame.h>
56 #include <machine/psl.h>
57 #include <machine/segments.h>
58 #include <machine/sysarch.h>
59
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63
64 #include <i386/linux/linux.h>
65 #include <i386/linux/linux_proto.h>
66 #include <compat/linux/linux_ipc.h>
67 #include <compat/linux/linux_misc.h>
68 #include <compat/linux/linux_signal.h>
69 #include <compat/linux/linux_util.h>
70 #include <compat/linux/linux_emul.h>
71
72 #include <i386/include/pcb.h> /* needed for pcb definition in linux_set_thread_area */
73
74 #include "opt_posix.h"
75
76 extern struct sysentvec elf32_freebsd_sysvec; /* defined in i386/i386/elf_machdep.c */
77
78 struct l_descriptor {
79 l_uint entry_number;
80 l_ulong base_addr;
81 l_uint limit;
82 l_uint seg_32bit:1;
83 l_uint contents:2;
84 l_uint read_exec_only:1;
85 l_uint limit_in_pages:1;
86 l_uint seg_not_present:1;
87 l_uint useable:1;
88 };
89
90 struct l_old_select_argv {
91 l_int nfds;
92 l_fd_set *readfds;
93 l_fd_set *writefds;
94 l_fd_set *exceptfds;
95 struct l_timeval *timeout;
96 };
97
98 static int linux_mmap_common(struct thread *td, l_uintptr_t addr,
99 l_size_t len, l_int prot, l_int flags, l_int fd,
100 l_loff_t pos);
101
102 int
103 linux_to_bsd_sigaltstack(int lsa)
104 {
105 int bsa = 0;
106
107 if (lsa & LINUX_SS_DISABLE)
108 bsa |= SS_DISABLE;
109 if (lsa & LINUX_SS_ONSTACK)
110 bsa |= SS_ONSTACK;
111 return (bsa);
112 }
113
114 int
115 bsd_to_linux_sigaltstack(int bsa)
116 {
117 int lsa = 0;
118
119 if (bsa & SS_DISABLE)
120 lsa |= LINUX_SS_DISABLE;
121 if (bsa & SS_ONSTACK)
122 lsa |= LINUX_SS_ONSTACK;
123 return (lsa);
124 }
125
126 int
127 linux_execve(struct thread *td, struct linux_execve_args *args)
128 {
129 int error;
130 char *newpath;
131 struct image_args eargs;
132
133 LCONVPATHEXIST(td, args->path, &newpath);
134
135 #ifdef DEBUG
136 if (ldebug(execve))
137 printf(ARGS(execve, "%s"), newpath);
138 #endif
139
140 error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE,
141 args->argp, args->envp);
142 free(newpath, M_TEMP);
143 if (error == 0)
144 error = kern_execve(td, &eargs, NULL);
145 if (error == 0)
146 /* linux process can exec fbsd one, dont attempt
147 * to create emuldata for such process using
148 * linux_proc_init, this leads to a panic on KASSERT
149 * because such process has p->p_emuldata == NULL
150 */
151 if (SV_PROC_ABI(td->td_proc) == SV_ABI_LINUX)
152 error = linux_proc_init(td, 0, 0);
153 return (error);
154 }
155
156 struct l_ipc_kludge {
157 struct l_msgbuf *msgp;
158 l_long msgtyp;
159 };
160
161 int
162 linux_ipc(struct thread *td, struct linux_ipc_args *args)
163 {
164
165 switch (args->what & 0xFFFF) {
166 case LINUX_SEMOP: {
167 struct linux_semop_args a;
168
169 a.semid = args->arg1;
170 a.tsops = args->ptr;
171 a.nsops = args->arg2;
172 return (linux_semop(td, &a));
173 }
174 case LINUX_SEMGET: {
175 struct linux_semget_args a;
176
177 a.key = args->arg1;
178 a.nsems = args->arg2;
179 a.semflg = args->arg3;
180 return (linux_semget(td, &a));
181 }
182 case LINUX_SEMCTL: {
183 struct linux_semctl_args a;
184 int error;
185
186 a.semid = args->arg1;
187 a.semnum = args->arg2;
188 a.cmd = args->arg3;
189 error = copyin(args->ptr, &a.arg, sizeof(a.arg));
190 if (error)
191 return (error);
192 return (linux_semctl(td, &a));
193 }
194 case LINUX_MSGSND: {
195 struct linux_msgsnd_args a;
196
197 a.msqid = args->arg1;
198 a.msgp = args->ptr;
199 a.msgsz = args->arg2;
200 a.msgflg = args->arg3;
201 return (linux_msgsnd(td, &a));
202 }
203 case LINUX_MSGRCV: {
204 struct linux_msgrcv_args a;
205
206 a.msqid = args->arg1;
207 a.msgsz = args->arg2;
208 a.msgflg = args->arg3;
209 if ((args->what >> 16) == 0) {
210 struct l_ipc_kludge tmp;
211 int error;
212
213 if (args->ptr == NULL)
214 return (EINVAL);
215 error = copyin(args->ptr, &tmp, sizeof(tmp));
216 if (error)
217 return (error);
218 a.msgp = tmp.msgp;
219 a.msgtyp = tmp.msgtyp;
220 } else {
221 a.msgp = args->ptr;
222 a.msgtyp = args->arg5;
223 }
224 return (linux_msgrcv(td, &a));
225 }
226 case LINUX_MSGGET: {
227 struct linux_msgget_args a;
228
229 a.key = args->arg1;
230 a.msgflg = args->arg2;
231 return (linux_msgget(td, &a));
232 }
233 case LINUX_MSGCTL: {
234 struct linux_msgctl_args a;
235
236 a.msqid = args->arg1;
237 a.cmd = args->arg2;
238 a.buf = args->ptr;
239 return (linux_msgctl(td, &a));
240 }
241 case LINUX_SHMAT: {
242 struct linux_shmat_args a;
243
244 a.shmid = args->arg1;
245 a.shmaddr = args->ptr;
246 a.shmflg = args->arg2;
247 a.raddr = (l_ulong *)args->arg3;
248 return (linux_shmat(td, &a));
249 }
250 case LINUX_SHMDT: {
251 struct linux_shmdt_args a;
252
253 a.shmaddr = args->ptr;
254 return (linux_shmdt(td, &a));
255 }
256 case LINUX_SHMGET: {
257 struct linux_shmget_args a;
258
259 a.key = args->arg1;
260 a.size = args->arg2;
261 a.shmflg = args->arg3;
262 return (linux_shmget(td, &a));
263 }
264 case LINUX_SHMCTL: {
265 struct linux_shmctl_args a;
266
267 a.shmid = args->arg1;
268 a.cmd = args->arg2;
269 a.buf = args->ptr;
270 return (linux_shmctl(td, &a));
271 }
272 default:
273 break;
274 }
275
276 return (EINVAL);
277 }
278
279 int
280 linux_old_select(struct thread *td, struct linux_old_select_args *args)
281 {
282 struct l_old_select_argv linux_args;
283 struct linux_select_args newsel;
284 int error;
285
286 #ifdef DEBUG
287 if (ldebug(old_select))
288 printf(ARGS(old_select, "%p"), args->ptr);
289 #endif
290
291 error = copyin(args->ptr, &linux_args, sizeof(linux_args));
292 if (error)
293 return (error);
294
295 newsel.nfds = linux_args.nfds;
296 newsel.readfds = linux_args.readfds;
297 newsel.writefds = linux_args.writefds;
298 newsel.exceptfds = linux_args.exceptfds;
299 newsel.timeout = linux_args.timeout;
300 return (linux_select(td, &newsel));
301 }
302
303 int
304 linux_set_cloned_tls(struct thread *td, void *desc)
305 {
306 struct segment_descriptor sd;
307 struct l_user_desc info;
308 int idx, error;
309 int a[2];
310
311 error = copyin(desc, &info, sizeof(struct l_user_desc));
312 if (error) {
313 printf(LMSG("copyin failed!"));
314 } else {
315 idx = info.entry_number;
316
317 /*
318 * looks like we're getting the idx we returned
319 * in the set_thread_area() syscall
320 */
321 if (idx != 6 && idx != 3) {
322 printf(LMSG("resetting idx!"));
323 idx = 3;
324 }
325
326 /* this doesnt happen in practice */
327 if (idx == 6) {
328 /* we might copy out the entry_number as 3 */
329 info.entry_number = 3;
330 error = copyout(&info, desc, sizeof(struct l_user_desc));
331 if (error)
332 printf(LMSG("copyout failed!"));
333 }
334
335 a[0] = LINUX_LDT_entry_a(&info);
336 a[1] = LINUX_LDT_entry_b(&info);
337
338 memcpy(&sd, &a, sizeof(a));
339 #ifdef DEBUG
340 if (ldebug(clone))
341 printf("Segment created in clone with "
342 "CLONE_SETTLS: lobase: %x, hibase: %x, "
343 "lolimit: %x, hilimit: %x, type: %i, "
344 "dpl: %i, p: %i, xx: %i, def32: %i, "
345 "gran: %i\n", sd.sd_lobase, sd.sd_hibase,
346 sd.sd_lolimit, sd.sd_hilimit, sd.sd_type,
347 sd.sd_dpl, sd.sd_p, sd.sd_xx,
348 sd.sd_def32, sd.sd_gran);
349 #endif
350
351 /* set %gs */
352 td->td_pcb->pcb_gsd = sd;
353 td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
354 }
355
356 return (error);
357 }
358
359 int
360 linux_set_upcall_kse(struct thread *td, register_t stack)
361 {
362
363 td->td_frame->tf_esp = stack;
364
365 return (0);
366 }
367
368 #define STACK_SIZE (2 * 1024 * 1024)
369 #define GUARD_SIZE (4 * PAGE_SIZE)
370
371 int
372 linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
373 {
374
375 #ifdef DEBUG
376 if (ldebug(mmap2))
377 printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
378 (void *)args->addr, args->len, args->prot,
379 args->flags, args->fd, args->pgoff);
380 #endif
381
382 return (linux_mmap_common(td, args->addr, args->len, args->prot,
383 args->flags, args->fd, (uint64_t)(uint32_t)args->pgoff *
384 PAGE_SIZE));
385 }
386
387 int
388 linux_mmap(struct thread *td, struct linux_mmap_args *args)
389 {
390 int error;
391 struct l_mmap_argv linux_args;
392
393 error = copyin(args->ptr, &linux_args, sizeof(linux_args));
394 if (error)
395 return (error);
396
397 #ifdef DEBUG
398 if (ldebug(mmap))
399 printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
400 (void *)linux_args.addr, linux_args.len, linux_args.prot,
401 linux_args.flags, linux_args.fd, linux_args.pgoff);
402 #endif
403
404 return (linux_mmap_common(td, linux_args.addr, linux_args.len,
405 linux_args.prot, linux_args.flags, linux_args.fd,
406 (uint32_t)linux_args.pgoff));
407 }
408
409 static int
410 linux_mmap_common(struct thread *td, l_uintptr_t addr, l_size_t len, l_int prot,
411 l_int flags, l_int fd, l_loff_t pos)
412 {
413 struct proc *p = td->td_proc;
414 struct mmap_args /* {
415 caddr_t addr;
416 size_t len;
417 int prot;
418 int flags;
419 int fd;
420 long pad;
421 off_t pos;
422 } */ bsd_args;
423 int error;
424 struct file *fp;
425
426 error = 0;
427 bsd_args.flags = 0;
428 fp = NULL;
429
430 /*
431 * Linux mmap(2):
432 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
433 */
434 if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE)))
435 return (EINVAL);
436
437 if (flags & LINUX_MAP_SHARED)
438 bsd_args.flags |= MAP_SHARED;
439 if (flags & LINUX_MAP_PRIVATE)
440 bsd_args.flags |= MAP_PRIVATE;
441 if (flags & LINUX_MAP_FIXED)
442 bsd_args.flags |= MAP_FIXED;
443 if (flags & LINUX_MAP_ANON) {
444 /* Enforce pos to be on page boundary, then ignore. */
445 if ((pos & PAGE_MASK) != 0)
446 return (EINVAL);
447 pos = 0;
448 bsd_args.flags |= MAP_ANON;
449 } else
450 bsd_args.flags |= MAP_NOSYNC;
451 if (flags & LINUX_MAP_GROWSDOWN)
452 bsd_args.flags |= MAP_STACK;
453
454 /*
455 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
456 * on Linux/i386. We do this to ensure maximum compatibility.
457 * Linux/ia64 does the same in i386 emulation mode.
458 */
459 bsd_args.prot = prot;
460 if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
461 bsd_args.prot |= PROT_READ | PROT_EXEC;
462
463 /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
464 bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd;
465 if (bsd_args.fd != -1) {
466 /*
467 * Linux follows Solaris mmap(2) description:
468 * The file descriptor fildes is opened with
469 * read permission, regardless of the
470 * protection options specified.
471 *
472 * Checking just CAP_MMAP is fine here, since the real work
473 * is done in the FreeBSD mmap().
474 */
475
476 if ((error = fget(td, bsd_args.fd, CAP_MMAP, &fp)) != 0)
477 return (error);
478 if (fp->f_type != DTYPE_VNODE) {
479 fdrop(fp, td);
480 return (EINVAL);
481 }
482
483 /* Linux mmap() just fails for O_WRONLY files */
484 if (!(fp->f_flag & FREAD)) {
485 fdrop(fp, td);
486 return (EACCES);
487 }
488
489 fdrop(fp, td);
490 }
491
492 if (flags & LINUX_MAP_GROWSDOWN) {
493 /*
494 * The Linux MAP_GROWSDOWN option does not limit auto
495 * growth of the region. Linux mmap with this option
496 * takes as addr the inital BOS, and as len, the initial
497 * region size. It can then grow down from addr without
498 * limit. However, linux threads has an implicit internal
499 * limit to stack size of STACK_SIZE. Its just not
500 * enforced explicitly in linux. But, here we impose
501 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
502 * region, since we can do this with our mmap.
503 *
504 * Our mmap with MAP_STACK takes addr as the maximum
505 * downsize limit on BOS, and as len the max size of
506 * the region. It them maps the top SGROWSIZ bytes,
507 * and auto grows the region down, up to the limit
508 * in addr.
509 *
510 * If we don't use the MAP_STACK option, the effect
511 * of this code is to allocate a stack region of a
512 * fixed size of (STACK_SIZE - GUARD_SIZE).
513 */
514
515 if ((caddr_t)PTRIN(addr) + len > p->p_vmspace->vm_maxsaddr) {
516 /*
517 * Some linux apps will attempt to mmap
518 * thread stacks near the top of their
519 * address space. If their TOS is greater
520 * than vm_maxsaddr, vm_map_growstack()
521 * will confuse the thread stack with the
522 * process stack and deliver a SEGV if they
523 * attempt to grow the thread stack past their
524 * current stacksize rlimit. To avoid this,
525 * adjust vm_maxsaddr upwards to reflect
526 * the current stacksize rlimit rather
527 * than the maximum possible stacksize.
528 * It would be better to adjust the
529 * mmap'ed region, but some apps do not check
530 * mmap's return value.
531 */
532 PROC_LOCK(p);
533 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
534 lim_cur(p, RLIMIT_STACK);
535 PROC_UNLOCK(p);
536 }
537
538 /*
539 * This gives us our maximum stack size and a new BOS.
540 * If we're using VM_STACK, then mmap will just map
541 * the top SGROWSIZ bytes, and let the stack grow down
542 * to the limit at BOS. If we're not using VM_STACK
543 * we map the full stack, since we don't have a way
544 * to autogrow it.
545 */
546 if (len > STACK_SIZE - GUARD_SIZE) {
547 bsd_args.addr = (caddr_t)PTRIN(addr);
548 bsd_args.len = len;
549 } else {
550 bsd_args.addr = (caddr_t)PTRIN(addr) -
551 (STACK_SIZE - GUARD_SIZE - len);
552 bsd_args.len = STACK_SIZE - GUARD_SIZE;
553 }
554 } else {
555 bsd_args.addr = (caddr_t)PTRIN(addr);
556 bsd_args.len = len;
557 }
558 bsd_args.pos = pos;
559
560 #ifdef DEBUG
561 if (ldebug(mmap))
562 printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
563 __func__,
564 (void *)bsd_args.addr, bsd_args.len, bsd_args.prot,
565 bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
566 #endif
567 error = sys_mmap(td, &bsd_args);
568 #ifdef DEBUG
569 if (ldebug(mmap))
570 printf("-> %s() return: 0x%x (0x%08x)\n",
571 __func__, error, (u_int)td->td_retval[0]);
572 #endif
573 return (error);
574 }
575
576 int
577 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
578 {
579 struct mprotect_args bsd_args;
580
581 bsd_args.addr = uap->addr;
582 bsd_args.len = uap->len;
583 bsd_args.prot = uap->prot;
584 if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
585 bsd_args.prot |= PROT_READ | PROT_EXEC;
586 return (sys_mprotect(td, &bsd_args));
587 }
588
589 int
590 linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
591 {
592 int error;
593 struct i386_ioperm_args iia;
594
595 iia.start = args->start;
596 iia.length = args->length;
597 iia.enable = args->enable;
598 error = i386_set_ioperm(td, &iia);
599 return (error);
600 }
601
602 int
603 linux_iopl(struct thread *td, struct linux_iopl_args *args)
604 {
605 int error;
606
607 if (args->level < 0 || args->level > 3)
608 return (EINVAL);
609 if ((error = priv_check(td, PRIV_IO)) != 0)
610 return (error);
611 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
612 return (error);
613 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
614 (args->level * (PSL_IOPL / 3));
615 return (0);
616 }
617
618 int
619 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
620 {
621 int error;
622 struct i386_ldt_args ldt;
623 struct l_descriptor ld;
624 union descriptor desc;
625 int size, written;
626
627 switch (uap->func) {
628 case 0x00: /* read_ldt */
629 ldt.start = 0;
630 ldt.descs = uap->ptr;
631 ldt.num = uap->bytecount / sizeof(union descriptor);
632 error = i386_get_ldt(td, &ldt);
633 td->td_retval[0] *= sizeof(union descriptor);
634 break;
635 case 0x02: /* read_default_ldt = 0 */
636 size = 5*sizeof(struct l_desc_struct);
637 if (size > uap->bytecount)
638 size = uap->bytecount;
639 for (written = error = 0; written < size && error == 0; written++)
640 error = subyte((char *)uap->ptr + written, 0);
641 td->td_retval[0] = written;
642 break;
643 case 0x01: /* write_ldt */
644 case 0x11: /* write_ldt */
645 if (uap->bytecount != sizeof(ld))
646 return (EINVAL);
647
648 error = copyin(uap->ptr, &ld, sizeof(ld));
649 if (error)
650 return (error);
651
652 ldt.start = ld.entry_number;
653 ldt.descs = &desc;
654 ldt.num = 1;
655 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
656 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
657 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
658 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
659 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
660 (ld.contents << 2);
661 desc.sd.sd_dpl = 3;
662 desc.sd.sd_p = (ld.seg_not_present ^ 1);
663 desc.sd.sd_xx = 0;
664 desc.sd.sd_def32 = ld.seg_32bit;
665 desc.sd.sd_gran = ld.limit_in_pages;
666 error = i386_set_ldt(td, &ldt, &desc);
667 break;
668 default:
669 error = ENOSYS;
670 break;
671 }
672
673 if (error == EOPNOTSUPP) {
674 printf("linux: modify_ldt needs kernel option USER_LDT\n");
675 error = ENOSYS;
676 }
677
678 return (error);
679 }
680
681 int
682 linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
683 {
684 l_osigaction_t osa;
685 l_sigaction_t act, oact;
686 int error;
687
688 #ifdef DEBUG
689 if (ldebug(sigaction))
690 printf(ARGS(sigaction, "%d, %p, %p"),
691 args->sig, (void *)args->nsa, (void *)args->osa);
692 #endif
693
694 if (args->nsa != NULL) {
695 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
696 if (error)
697 return (error);
698 act.lsa_handler = osa.lsa_handler;
699 act.lsa_flags = osa.lsa_flags;
700 act.lsa_restorer = osa.lsa_restorer;
701 LINUX_SIGEMPTYSET(act.lsa_mask);
702 act.lsa_mask.__bits[0] = osa.lsa_mask;
703 }
704
705 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
706 args->osa ? &oact : NULL);
707
708 if (args->osa != NULL && !error) {
709 osa.lsa_handler = oact.lsa_handler;
710 osa.lsa_flags = oact.lsa_flags;
711 osa.lsa_restorer = oact.lsa_restorer;
712 osa.lsa_mask = oact.lsa_mask.__bits[0];
713 error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
714 }
715
716 return (error);
717 }
718
719 /*
720 * Linux has two extra args, restart and oldmask. We dont use these,
721 * but it seems that "restart" is actually a context pointer that
722 * enables the signal to happen with a different register set.
723 */
724 int
725 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
726 {
727 sigset_t sigmask;
728 l_sigset_t mask;
729
730 #ifdef DEBUG
731 if (ldebug(sigsuspend))
732 printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
733 #endif
734
735 LINUX_SIGEMPTYSET(mask);
736 mask.__bits[0] = args->mask;
737 linux_to_bsd_sigset(&mask, &sigmask);
738 return (kern_sigsuspend(td, sigmask));
739 }
740
741 int
742 linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
743 {
744 l_sigset_t lmask;
745 sigset_t sigmask;
746 int error;
747
748 #ifdef DEBUG
749 if (ldebug(rt_sigsuspend))
750 printf(ARGS(rt_sigsuspend, "%p, %d"),
751 (void *)uap->newset, uap->sigsetsize);
752 #endif
753
754 if (uap->sigsetsize != sizeof(l_sigset_t))
755 return (EINVAL);
756
757 error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
758 if (error)
759 return (error);
760
761 linux_to_bsd_sigset(&lmask, &sigmask);
762 return (kern_sigsuspend(td, sigmask));
763 }
764
765 int
766 linux_pause(struct thread *td, struct linux_pause_args *args)
767 {
768 struct proc *p = td->td_proc;
769 sigset_t sigmask;
770
771 #ifdef DEBUG
772 if (ldebug(pause))
773 printf(ARGS(pause, ""));
774 #endif
775
776 PROC_LOCK(p);
777 sigmask = td->td_sigmask;
778 PROC_UNLOCK(p);
779 return (kern_sigsuspend(td, sigmask));
780 }
781
782 int
783 linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
784 {
785 stack_t ss, oss;
786 l_stack_t lss;
787 int error;
788
789 #ifdef DEBUG
790 if (ldebug(sigaltstack))
791 printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
792 #endif
793
794 if (uap->uss != NULL) {
795 error = copyin(uap->uss, &lss, sizeof(l_stack_t));
796 if (error)
797 return (error);
798
799 ss.ss_sp = lss.ss_sp;
800 ss.ss_size = lss.ss_size;
801 ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
802 }
803 error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
804 (uap->uoss != NULL) ? &oss : NULL);
805 if (!error && uap->uoss != NULL) {
806 lss.ss_sp = oss.ss_sp;
807 lss.ss_size = oss.ss_size;
808 lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
809 error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
810 }
811
812 return (error);
813 }
814
815 int
816 linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
817 {
818 struct ftruncate_args sa;
819
820 #ifdef DEBUG
821 if (ldebug(ftruncate64))
822 printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
823 (intmax_t)args->length);
824 #endif
825
826 sa.fd = args->fd;
827 sa.length = args->length;
828 return sys_ftruncate(td, &sa);
829 }
830
831 int
832 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
833 {
834 struct l_user_desc info;
835 int error;
836 int idx;
837 int a[2];
838 struct segment_descriptor sd;
839
840 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
841 if (error)
842 return (error);
843
844 #ifdef DEBUG
845 if (ldebug(set_thread_area))
846 printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
847 info.entry_number,
848 info.base_addr,
849 info.limit,
850 info.seg_32bit,
851 info.contents,
852 info.read_exec_only,
853 info.limit_in_pages,
854 info.seg_not_present,
855 info.useable);
856 #endif
857
858 idx = info.entry_number;
859 /*
860 * Semantics of linux version: every thread in the system has array of
861 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
862 * syscall loads one of the selected tls decriptors with a value and
863 * also loads GDT descriptors 6, 7 and 8 with the content of the
864 * per-thread descriptors.
865 *
866 * Semantics of fbsd version: I think we can ignore that linux has 3
867 * per-thread descriptors and use just the 1st one. The tls_array[]
868 * is used only in set/get-thread_area() syscalls and for loading the
869 * GDT descriptors. In fbsd we use just one GDT descriptor for TLS so
870 * we will load just one.
871 *
872 * XXX: this doesn't work when a user space process tries to use more
873 * than 1 TLS segment. Comment in the linux sources says wine might do
874 * this.
875 */
876
877 /*
878 * we support just GLIBC TLS now
879 * we should let 3 proceed as well because we use this segment so
880 * if code does two subsequent calls it should succeed
881 */
882 if (idx != 6 && idx != -1 && idx != 3)
883 return (EINVAL);
884
885 /*
886 * we have to copy out the GDT entry we use
887 * FreeBSD uses GDT entry #3 for storing %gs so load that
888 *
889 * XXX: what if a user space program doesn't check this value and tries
890 * to use 6, 7 or 8?
891 */
892 idx = info.entry_number = 3;
893 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
894 if (error)
895 return (error);
896
897 if (LINUX_LDT_empty(&info)) {
898 a[0] = 0;
899 a[1] = 0;
900 } else {
901 a[0] = LINUX_LDT_entry_a(&info);
902 a[1] = LINUX_LDT_entry_b(&info);
903 }
904
905 memcpy(&sd, &a, sizeof(a));
906 #ifdef DEBUG
907 if (ldebug(set_thread_area))
908 printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
909 sd.sd_hibase,
910 sd.sd_lolimit,
911 sd.sd_hilimit,
912 sd.sd_type,
913 sd.sd_dpl,
914 sd.sd_p,
915 sd.sd_xx,
916 sd.sd_def32,
917 sd.sd_gran);
918 #endif
919
920 /* this is taken from i386 version of cpu_set_user_tls() */
921 critical_enter();
922 /* set %gs */
923 td->td_pcb->pcb_gsd = sd;
924 PCPU_GET(fsgs_gdt)[1] = sd;
925 load_gs(GSEL(GUGS_SEL, SEL_UPL));
926 critical_exit();
927
928 return (0);
929 }
930
931 int
932 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
933 {
934
935 struct l_user_desc info;
936 int error;
937 int idx;
938 struct l_desc_struct desc;
939 struct segment_descriptor sd;
940
941 #ifdef DEBUG
942 if (ldebug(get_thread_area))
943 printf(ARGS(get_thread_area, "%p"), args->desc);
944 #endif
945
946 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
947 if (error)
948 return (error);
949
950 idx = info.entry_number;
951 /* XXX: I am not sure if we want 3 to be allowed too. */
952 if (idx != 6 && idx != 3)
953 return (EINVAL);
954
955 idx = 3;
956
957 memset(&info, 0, sizeof(info));
958
959 sd = PCPU_GET(fsgs_gdt)[1];
960
961 memcpy(&desc, &sd, sizeof(desc));
962
963 info.entry_number = idx;
964 info.base_addr = LINUX_GET_BASE(&desc);
965 info.limit = LINUX_GET_LIMIT(&desc);
966 info.seg_32bit = LINUX_GET_32BIT(&desc);
967 info.contents = LINUX_GET_CONTENTS(&desc);
968 info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
969 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
970 info.seg_not_present = !LINUX_GET_PRESENT(&desc);
971 info.useable = LINUX_GET_USEABLE(&desc);
972
973 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
974 if (error)
975 return (EFAULT);
976
977 return (0);
978 }
979
980 /* copied from kern/kern_time.c */
981 int
982 linux_timer_create(struct thread *td, struct linux_timer_create_args *args)
983 {
984 return sys_ktimer_create(td, (struct ktimer_create_args *) args);
985 }
986
987 int
988 linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args)
989 {
990 return sys_ktimer_settime(td, (struct ktimer_settime_args *) args);
991 }
992
993 int
994 linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args)
995 {
996 return sys_ktimer_gettime(td, (struct ktimer_gettime_args *) args);
997 }
998
999 int
1000 linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args)
1001 {
1002 return sys_ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args);
1003 }
1004
1005 int
1006 linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args)
1007 {
1008 return sys_ktimer_delete(td, (struct ktimer_delete_args *) args);
1009 }
1010
1011 /* XXX: this wont work with module - convert it */
1012 int
1013 linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
1014 {
1015 #ifdef P1003_1B_MQUEUE
1016 return sys_kmq_open(td, (struct kmq_open_args *) args);
1017 #else
1018 return (ENOSYS);
1019 #endif
1020 }
1021
1022 int
1023 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
1024 {
1025 #ifdef P1003_1B_MQUEUE
1026 return sys_kmq_unlink(td, (struct kmq_unlink_args *) args);
1027 #else
1028 return (ENOSYS);
1029 #endif
1030 }
1031
1032 int
1033 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
1034 {
1035 #ifdef P1003_1B_MQUEUE
1036 return sys_kmq_timedsend(td, (struct kmq_timedsend_args *) args);
1037 #else
1038 return (ENOSYS);
1039 #endif
1040 }
1041
1042 int
1043 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
1044 {
1045 #ifdef P1003_1B_MQUEUE
1046 return sys_kmq_timedreceive(td, (struct kmq_timedreceive_args *) args);
1047 #else
1048 return (ENOSYS);
1049 #endif
1050 }
1051
1052 int
1053 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
1054 {
1055 #ifdef P1003_1B_MQUEUE
1056 return sys_kmq_notify(td, (struct kmq_notify_args *) args);
1057 #else
1058 return (ENOSYS);
1059 #endif
1060 }
1061
1062 int
1063 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
1064 {
1065 #ifdef P1003_1B_MQUEUE
1066 return sys_kmq_setattr(td, (struct kmq_setattr_args *) args);
1067 #else
1068 return (ENOSYS);
1069 #endif
1070 }
1071
1072 int
1073 linux_wait4(struct thread *td, struct linux_wait4_args *args)
1074 {
1075 int error, options;
1076 struct rusage ru, *rup;
1077
1078 #ifdef DEBUG
1079 if (ldebug(wait4))
1080 printf(ARGS(wait4, "%d, %p, %d, %p"),
1081 args->pid, (void *)args->status, args->options,
1082 (void *)args->rusage);
1083 #endif
1084
1085 options = (args->options & (WNOHANG | WUNTRACED));
1086 /* WLINUXCLONE should be equal to __WCLONE, but we make sure */
1087 if (args->options & __WCLONE)
1088 options |= WLINUXCLONE;
1089
1090 if (args->rusage != NULL)
1091 rup = &ru;
1092 else
1093 rup = NULL;
1094 error = linux_common_wait(td, args->pid, args->status, options, rup);
1095 if (error)
1096 return (error);
1097 if (args->rusage != NULL)
1098 error = copyout(&ru, args->rusage, sizeof(ru));
1099
1100 return (error);
1101 }
Cache object: d3f17369cb8c3107d3470f08e9e52f85
|