1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_capsicum.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/capsicum.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/file.h>
52 #include <sys/lock.h>
53 #include <sys/proc.h>
54 #include <sys/signalvar.h>
55 #include <sys/socketvar.h>
56 #include <sys/uio.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/limits.h>
60 #include <sys/malloc.h>
61 #include <sys/poll.h>
62 #include <sys/resourcevar.h>
63 #include <sys/selinfo.h>
64 #include <sys/sleepqueue.h>
65 #include <sys/syscallsubr.h>
66 #include <sys/sysctl.h>
67 #include <sys/sysent.h>
68 #include <sys/vnode.h>
69 #include <sys/bio.h>
70 #include <sys/buf.h>
71 #include <sys/condvar.h>
72 #ifdef KTRACE
73 #include <sys/ktrace.h>
74 #endif
75
76 #include <security/audit/audit.h>
77
78 /*
79 * The following macro defines how many bytes will be allocated from
80 * the stack instead of memory allocated when passing the IOCTL data
81 * structures from userspace and to the kernel. Some IOCTLs having
82 * small data structures are used very frequently and this small
83 * buffer on the stack gives a significant speedup improvement for
84 * those requests. The value of this define should be greater or equal
85 * to 64 bytes and should also be power of two. The data structure is
86 * currently hard-aligned to a 8-byte boundary on the stack. This
87 * should currently be sufficient for all supported platforms.
88 */
89 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */
90 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */
91
92 #ifdef __LP64__
93 static int iosize_max_clamp = 0;
94 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
95 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
96 static int devfs_iosize_max_clamp = 1;
97 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
98 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
99 #endif
100
101 /*
102 * Assert that the return value of read(2) and write(2) syscalls fits
103 * into a register. If not, an architecture will need to provide the
104 * usermode wrappers to reconstruct the result.
105 */
106 CTASSERT(sizeof(register_t) >= sizeof(size_t));
107
108 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
109 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
110 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
111
112 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
113 u_int);
114 static int pollscan(struct thread *, struct pollfd *, u_int);
115 static int pollrescan(struct thread *);
116 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
117 static int selrescan(struct thread *, fd_mask **, fd_mask **);
118 static void selfdalloc(struct thread *, void *);
119 static void selfdfree(struct seltd *, struct selfd *);
120 static int dofileread(struct thread *, int, struct file *, struct uio *,
121 off_t, int);
122 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
123 off_t, int);
124 static void doselwakeup(struct selinfo *, int);
125 static void seltdinit(struct thread *);
126 static int seltdwait(struct thread *, sbintime_t, sbintime_t);
127 static void seltdclear(struct thread *);
128
129 /*
130 * One seltd per-thread allocated on demand as needed.
131 *
132 * t - protected by st_mtx
133 * k - Only accessed by curthread or read-only
134 */
135 struct seltd {
136 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
137 struct selfd *st_free1; /* (k) free fd for read set. */
138 struct selfd *st_free2; /* (k) free fd for write set. */
139 struct mtx st_mtx; /* Protects struct seltd */
140 struct cv st_wait; /* (t) Wait channel. */
141 int st_flags; /* (t) SELTD_ flags. */
142 };
143
144 #define SELTD_PENDING 0x0001 /* We have pending events. */
145 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
146
147 /*
148 * One selfd allocated per-thread per-file-descriptor.
149 * f - protected by sf_mtx
150 */
151 struct selfd {
152 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
153 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
154 struct selinfo *sf_si; /* (f) selinfo when linked. */
155 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
156 struct seltd *sf_td; /* (k) owning seltd. */
157 void *sf_cookie; /* (k) fd or pollfd. */
158 u_int sf_refs;
159 };
160
161 static uma_zone_t selfd_zone;
162 static struct mtx_pool *mtxpool_select;
163
164 #ifdef __LP64__
165 size_t
166 devfs_iosize_max(void)
167 {
168
169 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
170 INT_MAX : SSIZE_MAX);
171 }
172
173 size_t
174 iosize_max(void)
175 {
176
177 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
178 INT_MAX : SSIZE_MAX);
179 }
180 #endif
181
182 #ifndef _SYS_SYSPROTO_H_
183 struct read_args {
184 int fd;
185 void *buf;
186 size_t nbyte;
187 };
188 #endif
189 int
190 sys_read(td, uap)
191 struct thread *td;
192 struct read_args *uap;
193 {
194 struct uio auio;
195 struct iovec aiov;
196 int error;
197
198 if (uap->nbyte > IOSIZE_MAX)
199 return (EINVAL);
200 aiov.iov_base = uap->buf;
201 aiov.iov_len = uap->nbyte;
202 auio.uio_iov = &aiov;
203 auio.uio_iovcnt = 1;
204 auio.uio_resid = uap->nbyte;
205 auio.uio_segflg = UIO_USERSPACE;
206 error = kern_readv(td, uap->fd, &auio);
207 return(error);
208 }
209
210 /*
211 * Positioned read system call
212 */
213 #ifndef _SYS_SYSPROTO_H_
214 struct pread_args {
215 int fd;
216 void *buf;
217 size_t nbyte;
218 int pad;
219 off_t offset;
220 };
221 #endif
222 int
223 sys_pread(struct thread *td, struct pread_args *uap)
224 {
225
226 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
227 }
228
229 int
230 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset)
231 {
232 struct uio auio;
233 struct iovec aiov;
234 int error;
235
236 if (nbyte > IOSIZE_MAX)
237 return (EINVAL);
238 aiov.iov_base = buf;
239 aiov.iov_len = nbyte;
240 auio.uio_iov = &aiov;
241 auio.uio_iovcnt = 1;
242 auio.uio_resid = nbyte;
243 auio.uio_segflg = UIO_USERSPACE;
244 error = kern_preadv(td, fd, &auio, offset);
245 return (error);
246 }
247
248 #if defined(COMPAT_FREEBSD6)
249 int
250 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap)
251 {
252
253 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
254 }
255 #endif
256
257 /*
258 * Scatter read system call.
259 */
260 #ifndef _SYS_SYSPROTO_H_
261 struct readv_args {
262 int fd;
263 struct iovec *iovp;
264 u_int iovcnt;
265 };
266 #endif
267 int
268 sys_readv(struct thread *td, struct readv_args *uap)
269 {
270 struct uio *auio;
271 int error;
272
273 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
274 if (error)
275 return (error);
276 error = kern_readv(td, uap->fd, auio);
277 free(auio, M_IOV);
278 return (error);
279 }
280
281 int
282 kern_readv(struct thread *td, int fd, struct uio *auio)
283 {
284 struct file *fp;
285 cap_rights_t rights;
286 int error;
287
288 error = fget_read(td, fd, cap_rights_init(&rights, CAP_READ), &fp);
289 if (error)
290 return (error);
291 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
292 fdrop(fp, td);
293 return (error);
294 }
295
296 /*
297 * Scatter positioned read system call.
298 */
299 #ifndef _SYS_SYSPROTO_H_
300 struct preadv_args {
301 int fd;
302 struct iovec *iovp;
303 u_int iovcnt;
304 off_t offset;
305 };
306 #endif
307 int
308 sys_preadv(struct thread *td, struct preadv_args *uap)
309 {
310 struct uio *auio;
311 int error;
312
313 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
314 if (error)
315 return (error);
316 error = kern_preadv(td, uap->fd, auio, uap->offset);
317 free(auio, M_IOV);
318 return (error);
319 }
320
321 int
322 kern_preadv(td, fd, auio, offset)
323 struct thread *td;
324 int fd;
325 struct uio *auio;
326 off_t offset;
327 {
328 struct file *fp;
329 cap_rights_t rights;
330 int error;
331
332 error = fget_read(td, fd, cap_rights_init(&rights, CAP_PREAD), &fp);
333 if (error)
334 return (error);
335 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
336 error = ESPIPE;
337 else if (offset < 0 &&
338 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
339 error = EINVAL;
340 else
341 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
342 fdrop(fp, td);
343 return (error);
344 }
345
346 /*
347 * Common code for readv and preadv that reads data in
348 * from a file using the passed in uio, offset, and flags.
349 */
350 static int
351 dofileread(td, fd, fp, auio, offset, flags)
352 struct thread *td;
353 int fd;
354 struct file *fp;
355 struct uio *auio;
356 off_t offset;
357 int flags;
358 {
359 ssize_t cnt;
360 int error;
361 #ifdef KTRACE
362 struct uio *ktruio = NULL;
363 #endif
364
365 AUDIT_ARG_FD(fd);
366
367 /* Finish zero length reads right here */
368 if (auio->uio_resid == 0) {
369 td->td_retval[0] = 0;
370 return(0);
371 }
372 auio->uio_rw = UIO_READ;
373 auio->uio_offset = offset;
374 auio->uio_td = td;
375 #ifdef KTRACE
376 if (KTRPOINT(td, KTR_GENIO))
377 ktruio = cloneuio(auio);
378 #endif
379 cnt = auio->uio_resid;
380 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
381 if (auio->uio_resid != cnt && (error == ERESTART ||
382 error == EINTR || error == EWOULDBLOCK))
383 error = 0;
384 }
385 cnt -= auio->uio_resid;
386 #ifdef KTRACE
387 if (ktruio != NULL) {
388 ktruio->uio_resid = cnt;
389 ktrgenio(fd, UIO_READ, ktruio, error);
390 }
391 #endif
392 td->td_retval[0] = cnt;
393 return (error);
394 }
395
396 #ifndef _SYS_SYSPROTO_H_
397 struct write_args {
398 int fd;
399 const void *buf;
400 size_t nbyte;
401 };
402 #endif
403 int
404 sys_write(td, uap)
405 struct thread *td;
406 struct write_args *uap;
407 {
408 struct uio auio;
409 struct iovec aiov;
410 int error;
411
412 if (uap->nbyte > IOSIZE_MAX)
413 return (EINVAL);
414 aiov.iov_base = (void *)(uintptr_t)uap->buf;
415 aiov.iov_len = uap->nbyte;
416 auio.uio_iov = &aiov;
417 auio.uio_iovcnt = 1;
418 auio.uio_resid = uap->nbyte;
419 auio.uio_segflg = UIO_USERSPACE;
420 error = kern_writev(td, uap->fd, &auio);
421 return(error);
422 }
423
424 /*
425 * Positioned write system call.
426 */
427 #ifndef _SYS_SYSPROTO_H_
428 struct pwrite_args {
429 int fd;
430 const void *buf;
431 size_t nbyte;
432 int pad;
433 off_t offset;
434 };
435 #endif
436 int
437 sys_pwrite(struct thread *td, struct pwrite_args *uap)
438 {
439
440 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
441 }
442
443 int
444 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte,
445 off_t offset)
446 {
447 struct uio auio;
448 struct iovec aiov;
449 int error;
450
451 if (nbyte > IOSIZE_MAX)
452 return (EINVAL);
453 aiov.iov_base = (void *)(uintptr_t)buf;
454 aiov.iov_len = nbyte;
455 auio.uio_iov = &aiov;
456 auio.uio_iovcnt = 1;
457 auio.uio_resid = nbyte;
458 auio.uio_segflg = UIO_USERSPACE;
459 error = kern_pwritev(td, fd, &auio, offset);
460 return(error);
461 }
462
463 #if defined(COMPAT_FREEBSD6)
464 int
465 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap)
466 {
467
468 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
469 }
470 #endif
471
472 /*
473 * Gather write system call.
474 */
475 #ifndef _SYS_SYSPROTO_H_
476 struct writev_args {
477 int fd;
478 struct iovec *iovp;
479 u_int iovcnt;
480 };
481 #endif
482 int
483 sys_writev(struct thread *td, struct writev_args *uap)
484 {
485 struct uio *auio;
486 int error;
487
488 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
489 if (error)
490 return (error);
491 error = kern_writev(td, uap->fd, auio);
492 free(auio, M_IOV);
493 return (error);
494 }
495
496 int
497 kern_writev(struct thread *td, int fd, struct uio *auio)
498 {
499 struct file *fp;
500 cap_rights_t rights;
501 int error;
502
503 error = fget_write(td, fd, cap_rights_init(&rights, CAP_WRITE), &fp);
504 if (error)
505 return (error);
506 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
507 fdrop(fp, td);
508 return (error);
509 }
510
511 /*
512 * Gather positioned write system call.
513 */
514 #ifndef _SYS_SYSPROTO_H_
515 struct pwritev_args {
516 int fd;
517 struct iovec *iovp;
518 u_int iovcnt;
519 off_t offset;
520 };
521 #endif
522 int
523 sys_pwritev(struct thread *td, struct pwritev_args *uap)
524 {
525 struct uio *auio;
526 int error;
527
528 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
529 if (error)
530 return (error);
531 error = kern_pwritev(td, uap->fd, auio, uap->offset);
532 free(auio, M_IOV);
533 return (error);
534 }
535
536 int
537 kern_pwritev(td, fd, auio, offset)
538 struct thread *td;
539 struct uio *auio;
540 int fd;
541 off_t offset;
542 {
543 struct file *fp;
544 cap_rights_t rights;
545 int error;
546
547 error = fget_write(td, fd, cap_rights_init(&rights, CAP_PWRITE), &fp);
548 if (error)
549 return (error);
550 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
551 error = ESPIPE;
552 else if (offset < 0 &&
553 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
554 error = EINVAL;
555 else
556 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
557 fdrop(fp, td);
558 return (error);
559 }
560
561 /*
562 * Common code for writev and pwritev that writes data to
563 * a file using the passed in uio, offset, and flags.
564 */
565 static int
566 dofilewrite(td, fd, fp, auio, offset, flags)
567 struct thread *td;
568 int fd;
569 struct file *fp;
570 struct uio *auio;
571 off_t offset;
572 int flags;
573 {
574 ssize_t cnt;
575 int error;
576 #ifdef KTRACE
577 struct uio *ktruio = NULL;
578 #endif
579
580 AUDIT_ARG_FD(fd);
581 auio->uio_rw = UIO_WRITE;
582 auio->uio_td = td;
583 auio->uio_offset = offset;
584 #ifdef KTRACE
585 if (KTRPOINT(td, KTR_GENIO))
586 ktruio = cloneuio(auio);
587 #endif
588 cnt = auio->uio_resid;
589 if (fp->f_type == DTYPE_VNODE &&
590 (fp->f_vnread_flags & FDEVFS_VNODE) == 0)
591 bwillwrite();
592 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
593 if (auio->uio_resid != cnt && (error == ERESTART ||
594 error == EINTR || error == EWOULDBLOCK))
595 error = 0;
596 /* Socket layer is responsible for issuing SIGPIPE. */
597 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
598 PROC_LOCK(td->td_proc);
599 tdsignal(td, SIGPIPE);
600 PROC_UNLOCK(td->td_proc);
601 }
602 }
603 cnt -= auio->uio_resid;
604 #ifdef KTRACE
605 if (ktruio != NULL) {
606 ktruio->uio_resid = cnt;
607 ktrgenio(fd, UIO_WRITE, ktruio, error);
608 }
609 #endif
610 td->td_retval[0] = cnt;
611 return (error);
612 }
613
614 /*
615 * Truncate a file given a file descriptor.
616 *
617 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
618 * descriptor isn't writable.
619 */
620 int
621 kern_ftruncate(td, fd, length)
622 struct thread *td;
623 int fd;
624 off_t length;
625 {
626 struct file *fp;
627 cap_rights_t rights;
628 int error;
629
630 AUDIT_ARG_FD(fd);
631 if (length < 0)
632 return (EINVAL);
633 error = fget(td, fd, cap_rights_init(&rights, CAP_FTRUNCATE), &fp);
634 if (error)
635 return (error);
636 AUDIT_ARG_FILE(td->td_proc, fp);
637 if (!(fp->f_flag & FWRITE)) {
638 fdrop(fp, td);
639 return (EINVAL);
640 }
641 error = fo_truncate(fp, length, td->td_ucred, td);
642 fdrop(fp, td);
643 return (error);
644 }
645
646 #ifndef _SYS_SYSPROTO_H_
647 struct ftruncate_args {
648 int fd;
649 int pad;
650 off_t length;
651 };
652 #endif
653 int
654 sys_ftruncate(td, uap)
655 struct thread *td;
656 struct ftruncate_args *uap;
657 {
658
659 return (kern_ftruncate(td, uap->fd, uap->length));
660 }
661
662 #if defined(COMPAT_43)
663 #ifndef _SYS_SYSPROTO_H_
664 struct oftruncate_args {
665 int fd;
666 long length;
667 };
668 #endif
669 int
670 oftruncate(td, uap)
671 struct thread *td;
672 struct oftruncate_args *uap;
673 {
674
675 return (kern_ftruncate(td, uap->fd, uap->length));
676 }
677 #endif /* COMPAT_43 */
678
679 #ifndef _SYS_SYSPROTO_H_
680 struct ioctl_args {
681 int fd;
682 u_long com;
683 caddr_t data;
684 };
685 #endif
686 /* ARGSUSED */
687 int
688 sys_ioctl(struct thread *td, struct ioctl_args *uap)
689 {
690 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
691 uint32_t com;
692 int arg, error;
693 u_int size;
694 caddr_t data;
695
696 #ifdef INVARIANTS
697 if (uap->com > 0xffffffff) {
698 printf(
699 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
700 td->td_proc->p_pid, td->td_name, uap->com);
701 }
702 #endif
703 com = (uint32_t)uap->com;
704
705 /*
706 * Interpret high order word to find amount of data to be
707 * copied to/from the user's address space.
708 */
709 size = IOCPARM_LEN(com);
710 if ((size > IOCPARM_MAX) ||
711 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
712 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
713 ((com & IOC_OUT) && size == 0) ||
714 #else
715 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
716 #endif
717 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
718 return (ENOTTY);
719
720 if (size > 0) {
721 if (com & IOC_VOID) {
722 /* Integer argument. */
723 arg = (intptr_t)uap->data;
724 data = (void *)&arg;
725 size = 0;
726 } else {
727 if (size > SYS_IOCTL_SMALL_SIZE)
728 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
729 else
730 data = smalldata;
731 }
732 } else
733 data = (void *)&uap->data;
734 if (com & IOC_IN) {
735 error = copyin(uap->data, data, (u_int)size);
736 if (error != 0)
737 goto out;
738 } else if (com & IOC_OUT) {
739 /*
740 * Zero the buffer so the user always
741 * gets back something deterministic.
742 */
743 bzero(data, size);
744 }
745
746 error = kern_ioctl(td, uap->fd, com, data);
747
748 if (error == 0 && (com & IOC_OUT))
749 error = copyout(data, uap->data, (u_int)size);
750
751 out:
752 if (size > SYS_IOCTL_SMALL_SIZE)
753 free(data, M_IOCTLOPS);
754 return (error);
755 }
756
757 int
758 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
759 {
760 struct file *fp;
761 struct filedesc *fdp;
762 #ifndef CAPABILITIES
763 cap_rights_t rights;
764 #endif
765 int error, tmp, locked;
766
767 AUDIT_ARG_FD(fd);
768 AUDIT_ARG_CMD(com);
769
770 fdp = td->td_proc->p_fd;
771
772 switch (com) {
773 case FIONCLEX:
774 case FIOCLEX:
775 FILEDESC_XLOCK(fdp);
776 locked = LA_XLOCKED;
777 break;
778 default:
779 #ifdef CAPABILITIES
780 FILEDESC_SLOCK(fdp);
781 locked = LA_SLOCKED;
782 #else
783 locked = LA_UNLOCKED;
784 #endif
785 break;
786 }
787
788 #ifdef CAPABILITIES
789 if ((fp = fget_locked(fdp, fd)) == NULL) {
790 error = EBADF;
791 goto out;
792 }
793 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
794 fp = NULL; /* fhold() was not called yet */
795 goto out;
796 }
797 fhold(fp);
798 if (locked == LA_SLOCKED) {
799 FILEDESC_SUNLOCK(fdp);
800 locked = LA_UNLOCKED;
801 }
802 #else
803 error = fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
804 if (error != 0) {
805 fp = NULL;
806 goto out;
807 }
808 #endif
809 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
810 error = EBADF;
811 goto out;
812 }
813
814 switch (com) {
815 case FIONCLEX:
816 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
817 goto out;
818 case FIOCLEX:
819 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
820 goto out;
821 case FIONBIO:
822 if ((tmp = *(int *)data))
823 atomic_set_int(&fp->f_flag, FNONBLOCK);
824 else
825 atomic_clear_int(&fp->f_flag, FNONBLOCK);
826 data = (void *)&tmp;
827 break;
828 case FIOASYNC:
829 if ((tmp = *(int *)data))
830 atomic_set_int(&fp->f_flag, FASYNC);
831 else
832 atomic_clear_int(&fp->f_flag, FASYNC);
833 data = (void *)&tmp;
834 break;
835 }
836
837 error = fo_ioctl(fp, com, data, td->td_ucred, td);
838 out:
839 switch (locked) {
840 case LA_XLOCKED:
841 FILEDESC_XUNLOCK(fdp);
842 break;
843 #ifdef CAPABILITIES
844 case LA_SLOCKED:
845 FILEDESC_SUNLOCK(fdp);
846 break;
847 #endif
848 default:
849 FILEDESC_UNLOCK_ASSERT(fdp);
850 break;
851 }
852 if (fp != NULL)
853 fdrop(fp, td);
854 return (error);
855 }
856
857 int
858 poll_no_poll(int events)
859 {
860 /*
861 * Return true for read/write. If the user asked for something
862 * special, return POLLNVAL, so that clients have a way of
863 * determining reliably whether or not the extended
864 * functionality is present without hard-coding knowledge
865 * of specific filesystem implementations.
866 */
867 if (events & ~POLLSTANDARD)
868 return (POLLNVAL);
869
870 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
871 }
872
873 int
874 sys_pselect(struct thread *td, struct pselect_args *uap)
875 {
876 struct timespec ts;
877 struct timeval tv, *tvp;
878 sigset_t set, *uset;
879 int error;
880
881 if (uap->ts != NULL) {
882 error = copyin(uap->ts, &ts, sizeof(ts));
883 if (error != 0)
884 return (error);
885 TIMESPEC_TO_TIMEVAL(&tv, &ts);
886 tvp = &tv;
887 } else
888 tvp = NULL;
889 if (uap->sm != NULL) {
890 error = copyin(uap->sm, &set, sizeof(set));
891 if (error != 0)
892 return (error);
893 uset = &set;
894 } else
895 uset = NULL;
896 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
897 uset, NFDBITS));
898 }
899
900 int
901 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
902 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
903 {
904 int error;
905
906 if (uset != NULL) {
907 error = kern_sigprocmask(td, SIG_SETMASK, uset,
908 &td->td_oldsigmask, 0);
909 if (error != 0)
910 return (error);
911 td->td_pflags |= TDP_OLDMASK;
912 /*
913 * Make sure that ast() is called on return to
914 * usermode and TDP_OLDMASK is cleared, restoring old
915 * sigmask.
916 */
917 thread_lock(td);
918 td->td_flags |= TDF_ASTPENDING;
919 thread_unlock(td);
920 }
921 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
922 return (error);
923 }
924
925 #ifndef _SYS_SYSPROTO_H_
926 struct select_args {
927 int nd;
928 fd_set *in, *ou, *ex;
929 struct timeval *tv;
930 };
931 #endif
932 int
933 sys_select(struct thread *td, struct select_args *uap)
934 {
935 struct timeval tv, *tvp;
936 int error;
937
938 if (uap->tv != NULL) {
939 error = copyin(uap->tv, &tv, sizeof(tv));
940 if (error)
941 return (error);
942 tvp = &tv;
943 } else
944 tvp = NULL;
945
946 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
947 NFDBITS));
948 }
949
950 /*
951 * In the unlikely case when user specified n greater then the last
952 * open file descriptor, check that no bits are set after the last
953 * valid fd. We must return EBADF if any is set.
954 *
955 * There are applications that rely on the behaviour.
956 *
957 * nd is fd_lastfile + 1.
958 */
959 static int
960 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
961 {
962 char *addr, *oaddr;
963 int b, i, res;
964 uint8_t bits;
965
966 if (nd >= ndu || fd_in == NULL)
967 return (0);
968
969 oaddr = NULL;
970 bits = 0; /* silence gcc */
971 for (i = nd; i < ndu; i++) {
972 b = i / NBBY;
973 #if BYTE_ORDER == LITTLE_ENDIAN
974 addr = (char *)fd_in + b;
975 #else
976 addr = (char *)fd_in;
977 if (abi_nfdbits == NFDBITS) {
978 addr += rounddown(b, sizeof(fd_mask)) +
979 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
980 } else {
981 addr += rounddown(b, sizeof(uint32_t)) +
982 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
983 }
984 #endif
985 if (addr != oaddr) {
986 res = fubyte(addr);
987 if (res == -1)
988 return (EFAULT);
989 oaddr = addr;
990 bits = res;
991 }
992 if ((bits & (1 << (i % NBBY))) != 0)
993 return (EBADF);
994 }
995 return (0);
996 }
997
998 int
999 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
1000 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
1001 {
1002 struct filedesc *fdp;
1003 /*
1004 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
1005 * infds with the new FD_SETSIZE of 1024, and more than enough for
1006 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
1007 * of 256.
1008 */
1009 fd_mask s_selbits[howmany(2048, NFDBITS)];
1010 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
1011 struct timeval rtv;
1012 sbintime_t asbt, precision, rsbt;
1013 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
1014 int error, lf, ndu;
1015
1016 if (nd < 0)
1017 return (EINVAL);
1018 fdp = td->td_proc->p_fd;
1019 ndu = nd;
1020 lf = fdp->fd_lastfile;
1021 if (nd > lf + 1)
1022 nd = lf + 1;
1023
1024 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
1025 if (error != 0)
1026 return (error);
1027 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1028 if (error != 0)
1029 return (error);
1030 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1031 if (error != 0)
1032 return (error);
1033
1034 /*
1035 * Allocate just enough bits for the non-null fd_sets. Use the
1036 * preallocated auto buffer if possible.
1037 */
1038 nfdbits = roundup(nd, NFDBITS);
1039 ncpbytes = nfdbits / NBBY;
1040 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1041 nbufbytes = 0;
1042 if (fd_in != NULL)
1043 nbufbytes += 2 * ncpbytes;
1044 if (fd_ou != NULL)
1045 nbufbytes += 2 * ncpbytes;
1046 if (fd_ex != NULL)
1047 nbufbytes += 2 * ncpbytes;
1048 if (nbufbytes <= sizeof s_selbits)
1049 selbits = &s_selbits[0];
1050 else
1051 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1052
1053 /*
1054 * Assign pointers into the bit buffers and fetch the input bits.
1055 * Put the output buffers together so that they can be bzeroed
1056 * together.
1057 */
1058 sbp = selbits;
1059 #define getbits(name, x) \
1060 do { \
1061 if (name == NULL) { \
1062 ibits[x] = NULL; \
1063 obits[x] = NULL; \
1064 } else { \
1065 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
1066 obits[x] = sbp; \
1067 sbp += ncpbytes / sizeof *sbp; \
1068 error = copyin(name, ibits[x], ncpubytes); \
1069 if (error != 0) \
1070 goto done; \
1071 bzero((char *)ibits[x] + ncpubytes, \
1072 ncpbytes - ncpubytes); \
1073 } \
1074 } while (0)
1075 getbits(fd_in, 0);
1076 getbits(fd_ou, 1);
1077 getbits(fd_ex, 2);
1078 #undef getbits
1079
1080 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1081 /*
1082 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1083 * we are running under 32-bit emulation. This should be more
1084 * generic.
1085 */
1086 #define swizzle_fdset(bits) \
1087 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1088 int i; \
1089 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1090 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1091 }
1092 #else
1093 #define swizzle_fdset(bits)
1094 #endif
1095
1096 /* Make sure the bit order makes it through an ABI transition */
1097 swizzle_fdset(ibits[0]);
1098 swizzle_fdset(ibits[1]);
1099 swizzle_fdset(ibits[2]);
1100
1101 if (nbufbytes != 0)
1102 bzero(selbits, nbufbytes / 2);
1103
1104 precision = 0;
1105 if (tvp != NULL) {
1106 rtv = *tvp;
1107 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1108 rtv.tv_usec >= 1000000) {
1109 error = EINVAL;
1110 goto done;
1111 }
1112 if (!timevalisset(&rtv))
1113 asbt = 0;
1114 else if (rtv.tv_sec <= INT32_MAX) {
1115 rsbt = tvtosbt(rtv);
1116 precision = rsbt;
1117 precision >>= tc_precexp;
1118 if (TIMESEL(&asbt, rsbt))
1119 asbt += tc_tick_sbt;
1120 if (asbt <= SBT_MAX - rsbt)
1121 asbt += rsbt;
1122 else
1123 asbt = -1;
1124 } else
1125 asbt = -1;
1126 } else
1127 asbt = -1;
1128 seltdinit(td);
1129 /* Iterate until the timeout expires or descriptors become ready. */
1130 for (;;) {
1131 error = selscan(td, ibits, obits, nd);
1132 if (error || td->td_retval[0] != 0)
1133 break;
1134 error = seltdwait(td, asbt, precision);
1135 if (error)
1136 break;
1137 error = selrescan(td, ibits, obits);
1138 if (error || td->td_retval[0] != 0)
1139 break;
1140 }
1141 seltdclear(td);
1142
1143 done:
1144 /* select is not restarted after signals... */
1145 if (error == ERESTART)
1146 error = EINTR;
1147 if (error == EWOULDBLOCK)
1148 error = 0;
1149
1150 /* swizzle bit order back, if necessary */
1151 swizzle_fdset(obits[0]);
1152 swizzle_fdset(obits[1]);
1153 swizzle_fdset(obits[2]);
1154 #undef swizzle_fdset
1155
1156 #define putbits(name, x) \
1157 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1158 error = error2;
1159 if (error == 0) {
1160 int error2;
1161
1162 putbits(fd_in, 0);
1163 putbits(fd_ou, 1);
1164 putbits(fd_ex, 2);
1165 #undef putbits
1166 }
1167 if (selbits != &s_selbits[0])
1168 free(selbits, M_SELECT);
1169
1170 return (error);
1171 }
1172 /*
1173 * Convert a select bit set to poll flags.
1174 *
1175 * The backend always returns POLLHUP/POLLERR if appropriate and we
1176 * return this as a set bit in any set.
1177 */
1178 static int select_flags[3] = {
1179 POLLRDNORM | POLLHUP | POLLERR,
1180 POLLWRNORM | POLLHUP | POLLERR,
1181 POLLRDBAND | POLLERR
1182 };
1183
1184 /*
1185 * Compute the fo_poll flags required for a fd given by the index and
1186 * bit position in the fd_mask array.
1187 */
1188 static __inline int
1189 selflags(fd_mask **ibits, int idx, fd_mask bit)
1190 {
1191 int flags;
1192 int msk;
1193
1194 flags = 0;
1195 for (msk = 0; msk < 3; msk++) {
1196 if (ibits[msk] == NULL)
1197 continue;
1198 if ((ibits[msk][idx] & bit) == 0)
1199 continue;
1200 flags |= select_flags[msk];
1201 }
1202 return (flags);
1203 }
1204
1205 /*
1206 * Set the appropriate output bits given a mask of fired events and the
1207 * input bits originally requested.
1208 */
1209 static __inline int
1210 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1211 {
1212 int msk;
1213 int n;
1214
1215 n = 0;
1216 for (msk = 0; msk < 3; msk++) {
1217 if ((events & select_flags[msk]) == 0)
1218 continue;
1219 if (ibits[msk] == NULL)
1220 continue;
1221 if ((ibits[msk][idx] & bit) == 0)
1222 continue;
1223 /*
1224 * XXX Check for a duplicate set. This can occur because a
1225 * socket calls selrecord() twice for each poll() call
1226 * resulting in two selfds per real fd. selrescan() will
1227 * call selsetbits twice as a result.
1228 */
1229 if ((obits[msk][idx] & bit) != 0)
1230 continue;
1231 obits[msk][idx] |= bit;
1232 n++;
1233 }
1234
1235 return (n);
1236 }
1237
1238 static __inline int
1239 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
1240 {
1241 cap_rights_t rights;
1242
1243 cap_rights_init(&rights, CAP_EVENT);
1244
1245 return (fget_unlocked(fdp, fd, &rights, fpp, NULL));
1246 }
1247
1248 /*
1249 * Traverse the list of fds attached to this thread's seltd and check for
1250 * completion.
1251 */
1252 static int
1253 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1254 {
1255 struct filedesc *fdp;
1256 struct selinfo *si;
1257 struct seltd *stp;
1258 struct selfd *sfp;
1259 struct selfd *sfn;
1260 struct file *fp;
1261 fd_mask bit;
1262 int fd, ev, n, idx;
1263 int error;
1264
1265 fdp = td->td_proc->p_fd;
1266 stp = td->td_sel;
1267 n = 0;
1268 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1269 fd = (int)(uintptr_t)sfp->sf_cookie;
1270 si = sfp->sf_si;
1271 selfdfree(stp, sfp);
1272 /* If the selinfo wasn't cleared the event didn't fire. */
1273 if (si != NULL)
1274 continue;
1275 error = getselfd_cap(fdp, fd, &fp);
1276 if (error)
1277 return (error);
1278 idx = fd / NFDBITS;
1279 bit = (fd_mask)1 << (fd % NFDBITS);
1280 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1281 fdrop(fp, td);
1282 if (ev != 0)
1283 n += selsetbits(ibits, obits, idx, bit, ev);
1284 }
1285 stp->st_flags = 0;
1286 td->td_retval[0] = n;
1287 return (0);
1288 }
1289
1290 /*
1291 * Perform the initial filedescriptor scan and register ourselves with
1292 * each selinfo.
1293 */
1294 static int
1295 selscan(td, ibits, obits, nfd)
1296 struct thread *td;
1297 fd_mask **ibits, **obits;
1298 int nfd;
1299 {
1300 struct filedesc *fdp;
1301 struct file *fp;
1302 fd_mask bit;
1303 int ev, flags, end, fd;
1304 int n, idx;
1305 int error;
1306
1307 fdp = td->td_proc->p_fd;
1308 n = 0;
1309 for (idx = 0, fd = 0; fd < nfd; idx++) {
1310 end = imin(fd + NFDBITS, nfd);
1311 for (bit = 1; fd < end; bit <<= 1, fd++) {
1312 /* Compute the list of events we're interested in. */
1313 flags = selflags(ibits, idx, bit);
1314 if (flags == 0)
1315 continue;
1316 error = getselfd_cap(fdp, fd, &fp);
1317 if (error)
1318 return (error);
1319 selfdalloc(td, (void *)(uintptr_t)fd);
1320 ev = fo_poll(fp, flags, td->td_ucred, td);
1321 fdrop(fp, td);
1322 if (ev != 0)
1323 n += selsetbits(ibits, obits, idx, bit, ev);
1324 }
1325 }
1326
1327 td->td_retval[0] = n;
1328 return (0);
1329 }
1330
1331 int
1332 sys_poll(struct thread *td, struct poll_args *uap)
1333 {
1334 struct timespec ts, *tsp;
1335
1336 if (uap->timeout != INFTIM) {
1337 if (uap->timeout < 0)
1338 return (EINVAL);
1339 ts.tv_sec = uap->timeout / 1000;
1340 ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1341 tsp = &ts;
1342 } else
1343 tsp = NULL;
1344
1345 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1346 }
1347
1348 int
1349 kern_poll(struct thread *td, struct pollfd *fds, u_int nfds,
1350 struct timespec *tsp, sigset_t *uset)
1351 {
1352 struct pollfd *bits;
1353 struct pollfd smallbits[32];
1354 sbintime_t sbt, precision, tmp;
1355 time_t over;
1356 struct timespec ts;
1357 int error;
1358 size_t ni;
1359
1360 precision = 0;
1361 if (tsp != NULL) {
1362 if (tsp->tv_sec < 0)
1363 return (EINVAL);
1364 if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
1365 return (EINVAL);
1366 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1367 sbt = 0;
1368 else {
1369 ts = *tsp;
1370 if (ts.tv_sec > INT32_MAX / 2) {
1371 over = ts.tv_sec - INT32_MAX / 2;
1372 ts.tv_sec -= over;
1373 } else
1374 over = 0;
1375 tmp = tstosbt(ts);
1376 precision = tmp;
1377 precision >>= tc_precexp;
1378 if (TIMESEL(&sbt, tmp))
1379 sbt += tc_tick_sbt;
1380 sbt += tmp;
1381 }
1382 } else
1383 sbt = -1;
1384
1385 if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1386 return (EINVAL);
1387 ni = nfds * sizeof(struct pollfd);
1388 if (ni > sizeof(smallbits))
1389 bits = malloc(ni, M_TEMP, M_WAITOK);
1390 else
1391 bits = smallbits;
1392 error = copyin(fds, bits, ni);
1393 if (error)
1394 goto done;
1395
1396 if (uset != NULL) {
1397 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1398 &td->td_oldsigmask, 0);
1399 if (error)
1400 goto done;
1401 td->td_pflags |= TDP_OLDMASK;
1402 /*
1403 * Make sure that ast() is called on return to
1404 * usermode and TDP_OLDMASK is cleared, restoring old
1405 * sigmask.
1406 */
1407 thread_lock(td);
1408 td->td_flags |= TDF_ASTPENDING;
1409 thread_unlock(td);
1410 }
1411
1412 seltdinit(td);
1413 /* Iterate until the timeout expires or descriptors become ready. */
1414 for (;;) {
1415 error = pollscan(td, bits, nfds);
1416 if (error || td->td_retval[0] != 0)
1417 break;
1418 error = seltdwait(td, sbt, precision);
1419 if (error)
1420 break;
1421 error = pollrescan(td);
1422 if (error || td->td_retval[0] != 0)
1423 break;
1424 }
1425 seltdclear(td);
1426
1427 done:
1428 /* poll is not restarted after signals... */
1429 if (error == ERESTART)
1430 error = EINTR;
1431 if (error == EWOULDBLOCK)
1432 error = 0;
1433 if (error == 0) {
1434 error = pollout(td, bits, fds, nfds);
1435 if (error)
1436 goto out;
1437 }
1438 out:
1439 if (ni > sizeof(smallbits))
1440 free(bits, M_TEMP);
1441 return (error);
1442 }
1443
1444 int
1445 sys_ppoll(struct thread *td, struct ppoll_args *uap)
1446 {
1447 struct timespec ts, *tsp;
1448 sigset_t set, *ssp;
1449 int error;
1450
1451 if (uap->ts != NULL) {
1452 error = copyin(uap->ts, &ts, sizeof(ts));
1453 if (error)
1454 return (error);
1455 tsp = &ts;
1456 } else
1457 tsp = NULL;
1458 if (uap->set != NULL) {
1459 error = copyin(uap->set, &set, sizeof(set));
1460 if (error)
1461 return (error);
1462 ssp = &set;
1463 } else
1464 ssp = NULL;
1465 /*
1466 * fds is still a pointer to user space. kern_poll() will
1467 * take care of copyin that array to the kernel space.
1468 */
1469
1470 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1471 }
1472
1473 static int
1474 pollrescan(struct thread *td)
1475 {
1476 struct seltd *stp;
1477 struct selfd *sfp;
1478 struct selfd *sfn;
1479 struct selinfo *si;
1480 struct filedesc *fdp;
1481 struct file *fp;
1482 struct pollfd *fd;
1483 #ifdef CAPABILITIES
1484 cap_rights_t rights;
1485 #endif
1486 int n;
1487
1488 n = 0;
1489 fdp = td->td_proc->p_fd;
1490 stp = td->td_sel;
1491 FILEDESC_SLOCK(fdp);
1492 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1493 fd = (struct pollfd *)sfp->sf_cookie;
1494 si = sfp->sf_si;
1495 selfdfree(stp, sfp);
1496 /* If the selinfo wasn't cleared the event didn't fire. */
1497 if (si != NULL)
1498 continue;
1499 fp = fdp->fd_ofiles[fd->fd].fde_file;
1500 #ifdef CAPABILITIES
1501 if (fp == NULL ||
1502 cap_check(cap_rights(fdp, fd->fd),
1503 cap_rights_init(&rights, CAP_EVENT)) != 0)
1504 #else
1505 if (fp == NULL)
1506 #endif
1507 {
1508 fd->revents = POLLNVAL;
1509 n++;
1510 continue;
1511 }
1512
1513 /*
1514 * Note: backend also returns POLLHUP and
1515 * POLLERR if appropriate.
1516 */
1517 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1518 if (fd->revents != 0)
1519 n++;
1520 }
1521 FILEDESC_SUNLOCK(fdp);
1522 stp->st_flags = 0;
1523 td->td_retval[0] = n;
1524 return (0);
1525 }
1526
1527
1528 static int
1529 pollout(td, fds, ufds, nfd)
1530 struct thread *td;
1531 struct pollfd *fds;
1532 struct pollfd *ufds;
1533 u_int nfd;
1534 {
1535 int error = 0;
1536 u_int i = 0;
1537 u_int n = 0;
1538
1539 for (i = 0; i < nfd; i++) {
1540 error = copyout(&fds->revents, &ufds->revents,
1541 sizeof(ufds->revents));
1542 if (error)
1543 return (error);
1544 if (fds->revents != 0)
1545 n++;
1546 fds++;
1547 ufds++;
1548 }
1549 td->td_retval[0] = n;
1550 return (0);
1551 }
1552
1553 static int
1554 pollscan(td, fds, nfd)
1555 struct thread *td;
1556 struct pollfd *fds;
1557 u_int nfd;
1558 {
1559 struct filedesc *fdp = td->td_proc->p_fd;
1560 struct file *fp;
1561 #ifdef CAPABILITIES
1562 cap_rights_t rights;
1563 #endif
1564 int i, n = 0;
1565
1566 FILEDESC_SLOCK(fdp);
1567 for (i = 0; i < nfd; i++, fds++) {
1568 if (fds->fd > fdp->fd_lastfile) {
1569 fds->revents = POLLNVAL;
1570 n++;
1571 } else if (fds->fd < 0) {
1572 fds->revents = 0;
1573 } else {
1574 fp = fdp->fd_ofiles[fds->fd].fde_file;
1575 #ifdef CAPABILITIES
1576 if (fp == NULL ||
1577 cap_check(cap_rights(fdp, fds->fd),
1578 cap_rights_init(&rights, CAP_EVENT)) != 0)
1579 #else
1580 if (fp == NULL)
1581 #endif
1582 {
1583 fds->revents = POLLNVAL;
1584 n++;
1585 } else {
1586 /*
1587 * Note: backend also returns POLLHUP and
1588 * POLLERR if appropriate.
1589 */
1590 selfdalloc(td, fds);
1591 fds->revents = fo_poll(fp, fds->events,
1592 td->td_ucred, td);
1593 /*
1594 * POSIX requires POLLOUT to be never
1595 * set simultaneously with POLLHUP.
1596 */
1597 if ((fds->revents & POLLHUP) != 0)
1598 fds->revents &= ~POLLOUT;
1599
1600 if (fds->revents != 0)
1601 n++;
1602 }
1603 }
1604 }
1605 FILEDESC_SUNLOCK(fdp);
1606 td->td_retval[0] = n;
1607 return (0);
1608 }
1609
1610 /*
1611 * OpenBSD poll system call.
1612 *
1613 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1614 */
1615 #ifndef _SYS_SYSPROTO_H_
1616 struct openbsd_poll_args {
1617 struct pollfd *fds;
1618 u_int nfds;
1619 int timeout;
1620 };
1621 #endif
1622 int
1623 sys_openbsd_poll(td, uap)
1624 register struct thread *td;
1625 register struct openbsd_poll_args *uap;
1626 {
1627 return (sys_poll(td, (struct poll_args *)uap));
1628 }
1629
1630 /*
1631 * XXX This was created specifically to support netncp and netsmb. This
1632 * allows the caller to specify a socket to wait for events on. It returns
1633 * 0 if any events matched and an error otherwise. There is no way to
1634 * determine which events fired.
1635 */
1636 int
1637 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1638 {
1639 struct timeval rtv;
1640 sbintime_t asbt, precision, rsbt;
1641 int error;
1642
1643 precision = 0; /* stupid gcc! */
1644 if (tvp != NULL) {
1645 rtv = *tvp;
1646 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1647 rtv.tv_usec >= 1000000)
1648 return (EINVAL);
1649 if (!timevalisset(&rtv))
1650 asbt = 0;
1651 else if (rtv.tv_sec <= INT32_MAX) {
1652 rsbt = tvtosbt(rtv);
1653 precision = rsbt;
1654 precision >>= tc_precexp;
1655 if (TIMESEL(&asbt, rsbt))
1656 asbt += tc_tick_sbt;
1657 if (asbt <= SBT_MAX - rsbt)
1658 asbt += rsbt;
1659 else
1660 asbt = -1;
1661 } else
1662 asbt = -1;
1663 } else
1664 asbt = -1;
1665 seltdinit(td);
1666 /*
1667 * Iterate until the timeout expires or the socket becomes ready.
1668 */
1669 for (;;) {
1670 selfdalloc(td, NULL);
1671 error = sopoll(so, events, NULL, td);
1672 /* error here is actually the ready events. */
1673 if (error)
1674 return (0);
1675 error = seltdwait(td, asbt, precision);
1676 if (error)
1677 break;
1678 }
1679 seltdclear(td);
1680 /* XXX Duplicates ncp/smb behavior. */
1681 if (error == ERESTART)
1682 error = 0;
1683 return (error);
1684 }
1685
1686 /*
1687 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1688 * have two select sets, one for read and another for write.
1689 */
1690 static void
1691 selfdalloc(struct thread *td, void *cookie)
1692 {
1693 struct seltd *stp;
1694
1695 stp = td->td_sel;
1696 if (stp->st_free1 == NULL)
1697 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1698 stp->st_free1->sf_td = stp;
1699 stp->st_free1->sf_cookie = cookie;
1700 if (stp->st_free2 == NULL)
1701 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1702 stp->st_free2->sf_td = stp;
1703 stp->st_free2->sf_cookie = cookie;
1704 }
1705
1706 static void
1707 selfdfree(struct seltd *stp, struct selfd *sfp)
1708 {
1709 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1710 if (sfp->sf_si != NULL) {
1711 mtx_lock(sfp->sf_mtx);
1712 if (sfp->sf_si != NULL) {
1713 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1714 refcount_release(&sfp->sf_refs);
1715 }
1716 mtx_unlock(sfp->sf_mtx);
1717 }
1718 if (refcount_release(&sfp->sf_refs))
1719 uma_zfree(selfd_zone, sfp);
1720 }
1721
1722 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1723 void
1724 seldrain(sip)
1725 struct selinfo *sip;
1726 {
1727
1728 /*
1729 * This feature is already provided by doselwakeup(), thus it is
1730 * enough to go for it.
1731 * Eventually, the context, should take care to avoid races
1732 * between thread calling select()/poll() and file descriptor
1733 * detaching, but, again, the races are just the same as
1734 * selwakeup().
1735 */
1736 doselwakeup(sip, -1);
1737 }
1738
1739 /*
1740 * Record a select request.
1741 */
1742 void
1743 selrecord(selector, sip)
1744 struct thread *selector;
1745 struct selinfo *sip;
1746 {
1747 struct selfd *sfp;
1748 struct seltd *stp;
1749 struct mtx *mtxp;
1750
1751 stp = selector->td_sel;
1752 /*
1753 * Don't record when doing a rescan.
1754 */
1755 if (stp->st_flags & SELTD_RESCAN)
1756 return;
1757 /*
1758 * Grab one of the preallocated descriptors.
1759 */
1760 sfp = NULL;
1761 if ((sfp = stp->st_free1) != NULL)
1762 stp->st_free1 = NULL;
1763 else if ((sfp = stp->st_free2) != NULL)
1764 stp->st_free2 = NULL;
1765 else
1766 panic("selrecord: No free selfd on selq");
1767 mtxp = sip->si_mtx;
1768 if (mtxp == NULL)
1769 mtxp = mtx_pool_find(mtxpool_select, sip);
1770 /*
1771 * Initialize the sfp and queue it in the thread.
1772 */
1773 sfp->sf_si = sip;
1774 sfp->sf_mtx = mtxp;
1775 refcount_init(&sfp->sf_refs, 2);
1776 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1777 /*
1778 * Now that we've locked the sip, check for initialization.
1779 */
1780 mtx_lock(mtxp);
1781 if (sip->si_mtx == NULL) {
1782 sip->si_mtx = mtxp;
1783 TAILQ_INIT(&sip->si_tdlist);
1784 }
1785 /*
1786 * Add this thread to the list of selfds listening on this selinfo.
1787 */
1788 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1789 mtx_unlock(sip->si_mtx);
1790 }
1791
1792 /* Wake up a selecting thread. */
1793 void
1794 selwakeup(sip)
1795 struct selinfo *sip;
1796 {
1797 doselwakeup(sip, -1);
1798 }
1799
1800 /* Wake up a selecting thread, and set its priority. */
1801 void
1802 selwakeuppri(sip, pri)
1803 struct selinfo *sip;
1804 int pri;
1805 {
1806 doselwakeup(sip, pri);
1807 }
1808
1809 /*
1810 * Do a wakeup when a selectable event occurs.
1811 */
1812 static void
1813 doselwakeup(sip, pri)
1814 struct selinfo *sip;
1815 int pri;
1816 {
1817 struct selfd *sfp;
1818 struct selfd *sfn;
1819 struct seltd *stp;
1820
1821 /* If it's not initialized there can't be any waiters. */
1822 if (sip->si_mtx == NULL)
1823 return;
1824 /*
1825 * Locking the selinfo locks all selfds associated with it.
1826 */
1827 mtx_lock(sip->si_mtx);
1828 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1829 /*
1830 * Once we remove this sfp from the list and clear the
1831 * sf_si seltdclear will know to ignore this si.
1832 */
1833 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1834 sfp->sf_si = NULL;
1835 stp = sfp->sf_td;
1836 mtx_lock(&stp->st_mtx);
1837 stp->st_flags |= SELTD_PENDING;
1838 cv_broadcastpri(&stp->st_wait, pri);
1839 mtx_unlock(&stp->st_mtx);
1840 if (refcount_release(&sfp->sf_refs))
1841 uma_zfree(selfd_zone, sfp);
1842 }
1843 mtx_unlock(sip->si_mtx);
1844 }
1845
1846 static void
1847 seltdinit(struct thread *td)
1848 {
1849 struct seltd *stp;
1850
1851 if ((stp = td->td_sel) != NULL)
1852 goto out;
1853 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1854 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1855 cv_init(&stp->st_wait, "select");
1856 out:
1857 stp->st_flags = 0;
1858 STAILQ_INIT(&stp->st_selq);
1859 }
1860
1861 static int
1862 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1863 {
1864 struct seltd *stp;
1865 int error;
1866
1867 stp = td->td_sel;
1868 /*
1869 * An event of interest may occur while we do not hold the seltd
1870 * locked so check the pending flag before we sleep.
1871 */
1872 mtx_lock(&stp->st_mtx);
1873 /*
1874 * Any further calls to selrecord will be a rescan.
1875 */
1876 stp->st_flags |= SELTD_RESCAN;
1877 if (stp->st_flags & SELTD_PENDING) {
1878 mtx_unlock(&stp->st_mtx);
1879 return (0);
1880 }
1881 if (sbt == 0)
1882 error = EWOULDBLOCK;
1883 else if (sbt != -1)
1884 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
1885 sbt, precision, C_ABSOLUTE);
1886 else
1887 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1888 mtx_unlock(&stp->st_mtx);
1889
1890 return (error);
1891 }
1892
1893 void
1894 seltdfini(struct thread *td)
1895 {
1896 struct seltd *stp;
1897
1898 stp = td->td_sel;
1899 if (stp == NULL)
1900 return;
1901 if (stp->st_free1)
1902 uma_zfree(selfd_zone, stp->st_free1);
1903 if (stp->st_free2)
1904 uma_zfree(selfd_zone, stp->st_free2);
1905 td->td_sel = NULL;
1906 cv_destroy(&stp->st_wait);
1907 mtx_destroy(&stp->st_mtx);
1908 free(stp, M_SELECT);
1909 }
1910
1911 /*
1912 * Remove the references to the thread from all of the objects we were
1913 * polling.
1914 */
1915 static void
1916 seltdclear(struct thread *td)
1917 {
1918 struct seltd *stp;
1919 struct selfd *sfp;
1920 struct selfd *sfn;
1921
1922 stp = td->td_sel;
1923 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1924 selfdfree(stp, sfp);
1925 stp->st_flags = 0;
1926 }
1927
1928 static void selectinit(void *);
1929 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1930 static void
1931 selectinit(void *dummy __unused)
1932 {
1933
1934 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL,
1935 NULL, NULL, UMA_ALIGN_PTR, 0);
1936 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
1937 }
1938
1939 /*
1940 * Set up a syscall return value that follows the convention specified for
1941 * posix_* functions.
1942 */
1943 int
1944 kern_posix_error(struct thread *td, int error)
1945 {
1946
1947 if (error <= 0)
1948 return (error);
1949 td->td_errno = error;
1950 td->td_pflags |= TDP_NERRNO;
1951 td->td_retval[0] = error;
1952 return (0);
1953 }
Cache object: a227e17b3b952da4224e2f9c3afee477
|