1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.4/sys/kern/sys_generic.c 315481 2017-03-18 12:39:24Z mmokhi $");
39
40 #include "opt_capsicum.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/capsicum.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/file.h>
52 #include <sys/lock.h>
53 #include <sys/proc.h>
54 #include <sys/signalvar.h>
55 #include <sys/socketvar.h>
56 #include <sys/uio.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/limits.h>
60 #include <sys/malloc.h>
61 #include <sys/poll.h>
62 #include <sys/resourcevar.h>
63 #include <sys/selinfo.h>
64 #include <sys/sleepqueue.h>
65 #include <sys/syscallsubr.h>
66 #include <sys/sysctl.h>
67 #include <sys/sysent.h>
68 #include <sys/vnode.h>
69 #include <sys/bio.h>
70 #include <sys/buf.h>
71 #include <sys/condvar.h>
72 #ifdef KTRACE
73 #include <sys/ktrace.h>
74 #endif
75
76 #include <security/audit/audit.h>
77
78 /*
79 * The following macro defines how many bytes will be allocated from
80 * the stack instead of memory allocated when passing the IOCTL data
81 * structures from userspace and to the kernel. Some IOCTLs having
82 * small data structures are used very frequently and this small
83 * buffer on the stack gives a significant speedup improvement for
84 * those requests. The value of this define should be greater or equal
85 * to 64 bytes and should also be power of two. The data structure is
86 * currently hard-aligned to a 8-byte boundary on the stack. This
87 * should currently be sufficient for all supported platforms.
88 */
89 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */
90 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */
91
92 int iosize_max_clamp = 1;
93 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
94 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
95 int devfs_iosize_max_clamp = 1;
96 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
97 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
98
99 /*
100 * Assert that the return value of read(2) and write(2) syscalls fits
101 * into a register. If not, an architecture will need to provide the
102 * usermode wrappers to reconstruct the result.
103 */
104 CTASSERT(sizeof(register_t) >= sizeof(size_t));
105
106 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
107 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
108 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
109
110 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
111 u_int);
112 static int pollscan(struct thread *, struct pollfd *, u_int);
113 static int pollrescan(struct thread *);
114 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
115 static int selrescan(struct thread *, fd_mask **, fd_mask **);
116 static void selfdalloc(struct thread *, void *);
117 static void selfdfree(struct seltd *, struct selfd *);
118 static int dofileread(struct thread *, int, struct file *, struct uio *,
119 off_t, int);
120 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
121 off_t, int);
122 static void doselwakeup(struct selinfo *, int);
123 static void seltdinit(struct thread *);
124 static int seltdwait(struct thread *, sbintime_t, sbintime_t);
125 static void seltdclear(struct thread *);
126
127 /*
128 * One seltd per-thread allocated on demand as needed.
129 *
130 * t - protected by st_mtx
131 * k - Only accessed by curthread or read-only
132 */
133 struct seltd {
134 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
135 struct selfd *st_free1; /* (k) free fd for read set. */
136 struct selfd *st_free2; /* (k) free fd for write set. */
137 struct mtx st_mtx; /* Protects struct seltd */
138 struct cv st_wait; /* (t) Wait channel. */
139 int st_flags; /* (t) SELTD_ flags. */
140 };
141
142 #define SELTD_PENDING 0x0001 /* We have pending events. */
143 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
144
145 /*
146 * One selfd allocated per-thread per-file-descriptor.
147 * f - protected by sf_mtx
148 */
149 struct selfd {
150 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
151 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
152 struct selinfo *sf_si; /* (f) selinfo when linked. */
153 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
154 struct seltd *sf_td; /* (k) owning seltd. */
155 void *sf_cookie; /* (k) fd or pollfd. */
156 };
157
158 static uma_zone_t selfd_zone;
159 static struct mtx_pool *mtxpool_select;
160
161 #ifndef _SYS_SYSPROTO_H_
162 struct read_args {
163 int fd;
164 void *buf;
165 size_t nbyte;
166 };
167 #endif
168 int
169 sys_read(td, uap)
170 struct thread *td;
171 struct read_args *uap;
172 {
173 struct uio auio;
174 struct iovec aiov;
175 int error;
176
177 if (uap->nbyte > IOSIZE_MAX)
178 return (EINVAL);
179 aiov.iov_base = uap->buf;
180 aiov.iov_len = uap->nbyte;
181 auio.uio_iov = &aiov;
182 auio.uio_iovcnt = 1;
183 auio.uio_resid = uap->nbyte;
184 auio.uio_segflg = UIO_USERSPACE;
185 error = kern_readv(td, uap->fd, &auio);
186 return(error);
187 }
188
189 /*
190 * Positioned read system call
191 */
192 #ifndef _SYS_SYSPROTO_H_
193 struct pread_args {
194 int fd;
195 void *buf;
196 size_t nbyte;
197 int pad;
198 off_t offset;
199 };
200 #endif
201 int
202 sys_pread(td, uap)
203 struct thread *td;
204 struct pread_args *uap;
205 {
206 struct uio auio;
207 struct iovec aiov;
208 int error;
209
210 if (uap->nbyte > IOSIZE_MAX)
211 return (EINVAL);
212 aiov.iov_base = uap->buf;
213 aiov.iov_len = uap->nbyte;
214 auio.uio_iov = &aiov;
215 auio.uio_iovcnt = 1;
216 auio.uio_resid = uap->nbyte;
217 auio.uio_segflg = UIO_USERSPACE;
218 error = kern_preadv(td, uap->fd, &auio, uap->offset);
219 return(error);
220 }
221
222 int
223 freebsd6_pread(td, uap)
224 struct thread *td;
225 struct freebsd6_pread_args *uap;
226 {
227 struct pread_args oargs;
228
229 oargs.fd = uap->fd;
230 oargs.buf = uap->buf;
231 oargs.nbyte = uap->nbyte;
232 oargs.offset = uap->offset;
233 return (sys_pread(td, &oargs));
234 }
235
236 /*
237 * Scatter read system call.
238 */
239 #ifndef _SYS_SYSPROTO_H_
240 struct readv_args {
241 int fd;
242 struct iovec *iovp;
243 u_int iovcnt;
244 };
245 #endif
246 int
247 sys_readv(struct thread *td, struct readv_args *uap)
248 {
249 struct uio *auio;
250 int error;
251
252 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
253 if (error)
254 return (error);
255 error = kern_readv(td, uap->fd, auio);
256 free(auio, M_IOV);
257 return (error);
258 }
259
260 int
261 kern_readv(struct thread *td, int fd, struct uio *auio)
262 {
263 struct file *fp;
264 cap_rights_t rights;
265 int error;
266
267 error = fget_read(td, fd, cap_rights_init(&rights, CAP_READ), &fp);
268 if (error)
269 return (error);
270 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
271 fdrop(fp, td);
272 return (error);
273 }
274
275 /*
276 * Scatter positioned read system call.
277 */
278 #ifndef _SYS_SYSPROTO_H_
279 struct preadv_args {
280 int fd;
281 struct iovec *iovp;
282 u_int iovcnt;
283 off_t offset;
284 };
285 #endif
286 int
287 sys_preadv(struct thread *td, struct preadv_args *uap)
288 {
289 struct uio *auio;
290 int error;
291
292 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
293 if (error)
294 return (error);
295 error = kern_preadv(td, uap->fd, auio, uap->offset);
296 free(auio, M_IOV);
297 return (error);
298 }
299
300 int
301 kern_preadv(td, fd, auio, offset)
302 struct thread *td;
303 int fd;
304 struct uio *auio;
305 off_t offset;
306 {
307 struct file *fp;
308 cap_rights_t rights;
309 int error;
310
311 error = fget_read(td, fd, cap_rights_init(&rights, CAP_PREAD), &fp);
312 if (error)
313 return (error);
314 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
315 error = ESPIPE;
316 else if (offset < 0 &&
317 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
318 error = EINVAL;
319 else
320 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
321 fdrop(fp, td);
322 return (error);
323 }
324
325 /*
326 * Common code for readv and preadv that reads data in
327 * from a file using the passed in uio, offset, and flags.
328 */
329 static int
330 dofileread(td, fd, fp, auio, offset, flags)
331 struct thread *td;
332 int fd;
333 struct file *fp;
334 struct uio *auio;
335 off_t offset;
336 int flags;
337 {
338 ssize_t cnt;
339 int error;
340 #ifdef KTRACE
341 struct uio *ktruio = NULL;
342 #endif
343
344 /* Finish zero length reads right here */
345 if (auio->uio_resid == 0) {
346 td->td_retval[0] = 0;
347 return(0);
348 }
349 auio->uio_rw = UIO_READ;
350 auio->uio_offset = offset;
351 auio->uio_td = td;
352 #ifdef KTRACE
353 if (KTRPOINT(td, KTR_GENIO))
354 ktruio = cloneuio(auio);
355 #endif
356 cnt = auio->uio_resid;
357 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
358 if (auio->uio_resid != cnt && (error == ERESTART ||
359 error == EINTR || error == EWOULDBLOCK))
360 error = 0;
361 }
362 cnt -= auio->uio_resid;
363 #ifdef KTRACE
364 if (ktruio != NULL) {
365 ktruio->uio_resid = cnt;
366 ktrgenio(fd, UIO_READ, ktruio, error);
367 }
368 #endif
369 td->td_retval[0] = cnt;
370 return (error);
371 }
372
373 #ifndef _SYS_SYSPROTO_H_
374 struct write_args {
375 int fd;
376 const void *buf;
377 size_t nbyte;
378 };
379 #endif
380 int
381 sys_write(td, uap)
382 struct thread *td;
383 struct write_args *uap;
384 {
385 struct uio auio;
386 struct iovec aiov;
387 int error;
388
389 if (uap->nbyte > IOSIZE_MAX)
390 return (EINVAL);
391 aiov.iov_base = (void *)(uintptr_t)uap->buf;
392 aiov.iov_len = uap->nbyte;
393 auio.uio_iov = &aiov;
394 auio.uio_iovcnt = 1;
395 auio.uio_resid = uap->nbyte;
396 auio.uio_segflg = UIO_USERSPACE;
397 error = kern_writev(td, uap->fd, &auio);
398 return(error);
399 }
400
401 /*
402 * Positioned write system call.
403 */
404 #ifndef _SYS_SYSPROTO_H_
405 struct pwrite_args {
406 int fd;
407 const void *buf;
408 size_t nbyte;
409 int pad;
410 off_t offset;
411 };
412 #endif
413 int
414 sys_pwrite(td, uap)
415 struct thread *td;
416 struct pwrite_args *uap;
417 {
418 struct uio auio;
419 struct iovec aiov;
420 int error;
421
422 if (uap->nbyte > IOSIZE_MAX)
423 return (EINVAL);
424 aiov.iov_base = (void *)(uintptr_t)uap->buf;
425 aiov.iov_len = uap->nbyte;
426 auio.uio_iov = &aiov;
427 auio.uio_iovcnt = 1;
428 auio.uio_resid = uap->nbyte;
429 auio.uio_segflg = UIO_USERSPACE;
430 error = kern_pwritev(td, uap->fd, &auio, uap->offset);
431 return(error);
432 }
433
434 int
435 freebsd6_pwrite(td, uap)
436 struct thread *td;
437 struct freebsd6_pwrite_args *uap;
438 {
439 struct pwrite_args oargs;
440
441 oargs.fd = uap->fd;
442 oargs.buf = uap->buf;
443 oargs.nbyte = uap->nbyte;
444 oargs.offset = uap->offset;
445 return (sys_pwrite(td, &oargs));
446 }
447
448 /*
449 * Gather write system call.
450 */
451 #ifndef _SYS_SYSPROTO_H_
452 struct writev_args {
453 int fd;
454 struct iovec *iovp;
455 u_int iovcnt;
456 };
457 #endif
458 int
459 sys_writev(struct thread *td, struct writev_args *uap)
460 {
461 struct uio *auio;
462 int error;
463
464 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
465 if (error)
466 return (error);
467 error = kern_writev(td, uap->fd, auio);
468 free(auio, M_IOV);
469 return (error);
470 }
471
472 int
473 kern_writev(struct thread *td, int fd, struct uio *auio)
474 {
475 struct file *fp;
476 cap_rights_t rights;
477 int error;
478
479 error = fget_write(td, fd, cap_rights_init(&rights, CAP_WRITE), &fp);
480 if (error)
481 return (error);
482 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
483 fdrop(fp, td);
484 return (error);
485 }
486
487 /*
488 * Gather positioned write system call.
489 */
490 #ifndef _SYS_SYSPROTO_H_
491 struct pwritev_args {
492 int fd;
493 struct iovec *iovp;
494 u_int iovcnt;
495 off_t offset;
496 };
497 #endif
498 int
499 sys_pwritev(struct thread *td, struct pwritev_args *uap)
500 {
501 struct uio *auio;
502 int error;
503
504 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
505 if (error)
506 return (error);
507 error = kern_pwritev(td, uap->fd, auio, uap->offset);
508 free(auio, M_IOV);
509 return (error);
510 }
511
512 int
513 kern_pwritev(td, fd, auio, offset)
514 struct thread *td;
515 struct uio *auio;
516 int fd;
517 off_t offset;
518 {
519 struct file *fp;
520 cap_rights_t rights;
521 int error;
522
523 error = fget_write(td, fd, cap_rights_init(&rights, CAP_PWRITE), &fp);
524 if (error)
525 return (error);
526 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
527 error = ESPIPE;
528 else if (offset < 0 &&
529 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
530 error = EINVAL;
531 else
532 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
533 fdrop(fp, td);
534 return (error);
535 }
536
537 /*
538 * Common code for writev and pwritev that writes data to
539 * a file using the passed in uio, offset, and flags.
540 */
541 static int
542 dofilewrite(td, fd, fp, auio, offset, flags)
543 struct thread *td;
544 int fd;
545 struct file *fp;
546 struct uio *auio;
547 off_t offset;
548 int flags;
549 {
550 ssize_t cnt;
551 int error;
552 #ifdef KTRACE
553 struct uio *ktruio = NULL;
554 #endif
555
556 auio->uio_rw = UIO_WRITE;
557 auio->uio_td = td;
558 auio->uio_offset = offset;
559 #ifdef KTRACE
560 if (KTRPOINT(td, KTR_GENIO))
561 ktruio = cloneuio(auio);
562 #endif
563 cnt = auio->uio_resid;
564 if (fp->f_type == DTYPE_VNODE &&
565 (fp->f_vnread_flags & FDEVFS_VNODE) == 0)
566 bwillwrite();
567 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
568 if (auio->uio_resid != cnt && (error == ERESTART ||
569 error == EINTR || error == EWOULDBLOCK))
570 error = 0;
571 /* Socket layer is responsible for issuing SIGPIPE. */
572 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
573 PROC_LOCK(td->td_proc);
574 tdsignal(td, SIGPIPE);
575 PROC_UNLOCK(td->td_proc);
576 }
577 }
578 cnt -= auio->uio_resid;
579 #ifdef KTRACE
580 if (ktruio != NULL) {
581 ktruio->uio_resid = cnt;
582 ktrgenio(fd, UIO_WRITE, ktruio, error);
583 }
584 #endif
585 td->td_retval[0] = cnt;
586 return (error);
587 }
588
589 /*
590 * Truncate a file given a file descriptor.
591 *
592 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
593 * descriptor isn't writable.
594 */
595 int
596 kern_ftruncate(td, fd, length)
597 struct thread *td;
598 int fd;
599 off_t length;
600 {
601 struct file *fp;
602 cap_rights_t rights;
603 int error;
604
605 AUDIT_ARG_FD(fd);
606 if (length < 0)
607 return (EINVAL);
608 error = fget(td, fd, cap_rights_init(&rights, CAP_FTRUNCATE), &fp);
609 if (error)
610 return (error);
611 AUDIT_ARG_FILE(td->td_proc, fp);
612 if (!(fp->f_flag & FWRITE)) {
613 fdrop(fp, td);
614 return (EINVAL);
615 }
616 error = fo_truncate(fp, length, td->td_ucred, td);
617 fdrop(fp, td);
618 return (error);
619 }
620
621 #ifndef _SYS_SYSPROTO_H_
622 struct ftruncate_args {
623 int fd;
624 int pad;
625 off_t length;
626 };
627 #endif
628 int
629 sys_ftruncate(td, uap)
630 struct thread *td;
631 struct ftruncate_args *uap;
632 {
633
634 return (kern_ftruncate(td, uap->fd, uap->length));
635 }
636
637 #if defined(COMPAT_43)
638 #ifndef _SYS_SYSPROTO_H_
639 struct oftruncate_args {
640 int fd;
641 long length;
642 };
643 #endif
644 int
645 oftruncate(td, uap)
646 struct thread *td;
647 struct oftruncate_args *uap;
648 {
649
650 return (kern_ftruncate(td, uap->fd, uap->length));
651 }
652 #endif /* COMPAT_43 */
653
654 #ifndef _SYS_SYSPROTO_H_
655 struct ioctl_args {
656 int fd;
657 u_long com;
658 caddr_t data;
659 };
660 #endif
661 /* ARGSUSED */
662 int
663 sys_ioctl(struct thread *td, struct ioctl_args *uap)
664 {
665 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
666 u_long com;
667 int arg, error;
668 u_int size;
669 caddr_t data;
670
671 if (uap->com > 0xffffffff) {
672 printf(
673 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
674 td->td_proc->p_pid, td->td_name, uap->com);
675 uap->com &= 0xffffffff;
676 }
677 com = uap->com;
678
679 /*
680 * Interpret high order word to find amount of data to be
681 * copied to/from the user's address space.
682 */
683 size = IOCPARM_LEN(com);
684 if ((size > IOCPARM_MAX) ||
685 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
686 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
687 ((com & IOC_OUT) && size == 0) ||
688 #else
689 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
690 #endif
691 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
692 return (ENOTTY);
693
694 if (size > 0) {
695 if (com & IOC_VOID) {
696 /* Integer argument. */
697 arg = (intptr_t)uap->data;
698 data = (void *)&arg;
699 size = 0;
700 } else {
701 if (size > SYS_IOCTL_SMALL_SIZE)
702 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
703 else
704 data = smalldata;
705 }
706 } else
707 data = (void *)&uap->data;
708 if (com & IOC_IN) {
709 error = copyin(uap->data, data, (u_int)size);
710 if (error != 0)
711 goto out;
712 } else if (com & IOC_OUT) {
713 /*
714 * Zero the buffer so the user always
715 * gets back something deterministic.
716 */
717 bzero(data, size);
718 }
719
720 error = kern_ioctl(td, uap->fd, com, data);
721
722 if (error == 0 && (com & IOC_OUT))
723 error = copyout(data, uap->data, (u_int)size);
724
725 out:
726 if (size > SYS_IOCTL_SMALL_SIZE)
727 free(data, M_IOCTLOPS);
728 return (error);
729 }
730
731 int
732 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
733 {
734 struct file *fp;
735 struct filedesc *fdp;
736 #ifndef CAPABILITIES
737 cap_rights_t rights;
738 #endif
739 int error, tmp, locked;
740
741 AUDIT_ARG_FD(fd);
742 AUDIT_ARG_CMD(com);
743
744 fdp = td->td_proc->p_fd;
745
746 switch (com) {
747 case FIONCLEX:
748 case FIOCLEX:
749 FILEDESC_XLOCK(fdp);
750 locked = LA_XLOCKED;
751 break;
752 default:
753 #ifdef CAPABILITIES
754 FILEDESC_SLOCK(fdp);
755 locked = LA_SLOCKED;
756 #else
757 locked = LA_UNLOCKED;
758 #endif
759 break;
760 }
761
762 #ifdef CAPABILITIES
763 if ((fp = fget_locked(fdp, fd)) == NULL) {
764 error = EBADF;
765 goto out;
766 }
767 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
768 fp = NULL; /* fhold() was not called yet */
769 goto out;
770 }
771 fhold(fp);
772 if (locked == LA_SLOCKED) {
773 FILEDESC_SUNLOCK(fdp);
774 locked = LA_UNLOCKED;
775 }
776 #else
777 error = fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
778 if (error != 0) {
779 fp = NULL;
780 goto out;
781 }
782 #endif
783 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
784 error = EBADF;
785 goto out;
786 }
787
788 switch (com) {
789 case FIONCLEX:
790 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
791 goto out;
792 case FIOCLEX:
793 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
794 goto out;
795 case FIONBIO:
796 if ((tmp = *(int *)data))
797 atomic_set_int(&fp->f_flag, FNONBLOCK);
798 else
799 atomic_clear_int(&fp->f_flag, FNONBLOCK);
800 data = (void *)&tmp;
801 break;
802 case FIOASYNC:
803 if ((tmp = *(int *)data))
804 atomic_set_int(&fp->f_flag, FASYNC);
805 else
806 atomic_clear_int(&fp->f_flag, FASYNC);
807 data = (void *)&tmp;
808 break;
809 }
810
811 error = fo_ioctl(fp, com, data, td->td_ucred, td);
812 out:
813 switch (locked) {
814 case LA_XLOCKED:
815 FILEDESC_XUNLOCK(fdp);
816 break;
817 #ifdef CAPABILITIES
818 case LA_SLOCKED:
819 FILEDESC_SUNLOCK(fdp);
820 break;
821 #endif
822 default:
823 FILEDESC_UNLOCK_ASSERT(fdp);
824 break;
825 }
826 if (fp != NULL)
827 fdrop(fp, td);
828 return (error);
829 }
830
831 int
832 poll_no_poll(int events)
833 {
834 /*
835 * Return true for read/write. If the user asked for something
836 * special, return POLLNVAL, so that clients have a way of
837 * determining reliably whether or not the extended
838 * functionality is present without hard-coding knowledge
839 * of specific filesystem implementations.
840 */
841 if (events & ~POLLSTANDARD)
842 return (POLLNVAL);
843
844 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
845 }
846
847 int
848 sys_pselect(struct thread *td, struct pselect_args *uap)
849 {
850 struct timespec ts;
851 struct timeval tv, *tvp;
852 sigset_t set, *uset;
853 int error;
854
855 if (uap->ts != NULL) {
856 error = copyin(uap->ts, &ts, sizeof(ts));
857 if (error != 0)
858 return (error);
859 TIMESPEC_TO_TIMEVAL(&tv, &ts);
860 tvp = &tv;
861 } else
862 tvp = NULL;
863 if (uap->sm != NULL) {
864 error = copyin(uap->sm, &set, sizeof(set));
865 if (error != 0)
866 return (error);
867 uset = &set;
868 } else
869 uset = NULL;
870 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
871 uset, NFDBITS));
872 }
873
874 int
875 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
876 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
877 {
878 int error;
879
880 if (uset != NULL) {
881 error = kern_sigprocmask(td, SIG_SETMASK, uset,
882 &td->td_oldsigmask, 0);
883 if (error != 0)
884 return (error);
885 td->td_pflags |= TDP_OLDMASK;
886 /*
887 * Make sure that ast() is called on return to
888 * usermode and TDP_OLDMASK is cleared, restoring old
889 * sigmask.
890 */
891 thread_lock(td);
892 td->td_flags |= TDF_ASTPENDING;
893 thread_unlock(td);
894 }
895 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
896 return (error);
897 }
898
899 #ifndef _SYS_SYSPROTO_H_
900 struct select_args {
901 int nd;
902 fd_set *in, *ou, *ex;
903 struct timeval *tv;
904 };
905 #endif
906 int
907 sys_select(struct thread *td, struct select_args *uap)
908 {
909 struct timeval tv, *tvp;
910 int error;
911
912 if (uap->tv != NULL) {
913 error = copyin(uap->tv, &tv, sizeof(tv));
914 if (error)
915 return (error);
916 tvp = &tv;
917 } else
918 tvp = NULL;
919
920 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
921 NFDBITS));
922 }
923
924 /*
925 * In the unlikely case when user specified n greater then the last
926 * open file descriptor, check that no bits are set after the last
927 * valid fd. We must return EBADF if any is set.
928 *
929 * There are applications that rely on the behaviour.
930 *
931 * nd is fd_lastfile + 1.
932 */
933 static int
934 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
935 {
936 char *addr, *oaddr;
937 int b, i, res;
938 uint8_t bits;
939
940 if (nd >= ndu || fd_in == NULL)
941 return (0);
942
943 oaddr = NULL;
944 bits = 0; /* silence gcc */
945 for (i = nd; i < ndu; i++) {
946 b = i / NBBY;
947 #if BYTE_ORDER == LITTLE_ENDIAN
948 addr = (char *)fd_in + b;
949 #else
950 addr = (char *)fd_in;
951 if (abi_nfdbits == NFDBITS) {
952 addr += rounddown(b, sizeof(fd_mask)) +
953 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
954 } else {
955 addr += rounddown(b, sizeof(uint32_t)) +
956 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
957 }
958 #endif
959 if (addr != oaddr) {
960 res = fubyte(addr);
961 if (res == -1)
962 return (EFAULT);
963 oaddr = addr;
964 bits = res;
965 }
966 if ((bits & (1 << (i % NBBY))) != 0)
967 return (EBADF);
968 }
969 return (0);
970 }
971
972 int
973 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
974 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
975 {
976 struct filedesc *fdp;
977 /*
978 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
979 * infds with the new FD_SETSIZE of 1024, and more than enough for
980 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
981 * of 256.
982 */
983 fd_mask s_selbits[howmany(2048, NFDBITS)];
984 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
985 struct timeval rtv;
986 sbintime_t asbt, precision, rsbt;
987 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
988 int error, lf, ndu;
989
990 if (nd < 0)
991 return (EINVAL);
992 fdp = td->td_proc->p_fd;
993 ndu = nd;
994 lf = fdp->fd_lastfile;
995 if (nd > lf + 1)
996 nd = lf + 1;
997
998 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
999 if (error != 0)
1000 return (error);
1001 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1002 if (error != 0)
1003 return (error);
1004 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1005 if (error != 0)
1006 return (error);
1007
1008 /*
1009 * Allocate just enough bits for the non-null fd_sets. Use the
1010 * preallocated auto buffer if possible.
1011 */
1012 nfdbits = roundup(nd, NFDBITS);
1013 ncpbytes = nfdbits / NBBY;
1014 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1015 nbufbytes = 0;
1016 if (fd_in != NULL)
1017 nbufbytes += 2 * ncpbytes;
1018 if (fd_ou != NULL)
1019 nbufbytes += 2 * ncpbytes;
1020 if (fd_ex != NULL)
1021 nbufbytes += 2 * ncpbytes;
1022 if (nbufbytes <= sizeof s_selbits)
1023 selbits = &s_selbits[0];
1024 else
1025 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1026
1027 /*
1028 * Assign pointers into the bit buffers and fetch the input bits.
1029 * Put the output buffers together so that they can be bzeroed
1030 * together.
1031 */
1032 sbp = selbits;
1033 #define getbits(name, x) \
1034 do { \
1035 if (name == NULL) { \
1036 ibits[x] = NULL; \
1037 obits[x] = NULL; \
1038 } else { \
1039 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
1040 obits[x] = sbp; \
1041 sbp += ncpbytes / sizeof *sbp; \
1042 error = copyin(name, ibits[x], ncpubytes); \
1043 if (error != 0) \
1044 goto done; \
1045 bzero((char *)ibits[x] + ncpubytes, \
1046 ncpbytes - ncpubytes); \
1047 } \
1048 } while (0)
1049 getbits(fd_in, 0);
1050 getbits(fd_ou, 1);
1051 getbits(fd_ex, 2);
1052 #undef getbits
1053
1054 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1055 /*
1056 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1057 * we are running under 32-bit emulation. This should be more
1058 * generic.
1059 */
1060 #define swizzle_fdset(bits) \
1061 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1062 int i; \
1063 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1064 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1065 }
1066 #else
1067 #define swizzle_fdset(bits)
1068 #endif
1069
1070 /* Make sure the bit order makes it through an ABI transition */
1071 swizzle_fdset(ibits[0]);
1072 swizzle_fdset(ibits[1]);
1073 swizzle_fdset(ibits[2]);
1074
1075 if (nbufbytes != 0)
1076 bzero(selbits, nbufbytes / 2);
1077
1078 precision = 0;
1079 if (tvp != NULL) {
1080 rtv = *tvp;
1081 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1082 rtv.tv_usec >= 1000000) {
1083 error = EINVAL;
1084 goto done;
1085 }
1086 if (!timevalisset(&rtv))
1087 asbt = 0;
1088 else if (rtv.tv_sec <= INT32_MAX) {
1089 rsbt = tvtosbt(rtv);
1090 precision = rsbt;
1091 precision >>= tc_precexp;
1092 if (TIMESEL(&asbt, rsbt))
1093 asbt += tc_tick_sbt;
1094 if (asbt <= SBT_MAX - rsbt)
1095 asbt += rsbt;
1096 else
1097 asbt = -1;
1098 } else
1099 asbt = -1;
1100 } else
1101 asbt = -1;
1102 seltdinit(td);
1103 /* Iterate until the timeout expires or descriptors become ready. */
1104 for (;;) {
1105 error = selscan(td, ibits, obits, nd);
1106 if (error || td->td_retval[0] != 0)
1107 break;
1108 error = seltdwait(td, asbt, precision);
1109 if (error)
1110 break;
1111 error = selrescan(td, ibits, obits);
1112 if (error || td->td_retval[0] != 0)
1113 break;
1114 }
1115 seltdclear(td);
1116
1117 done:
1118 /* select is not restarted after signals... */
1119 if (error == ERESTART)
1120 error = EINTR;
1121 if (error == EWOULDBLOCK)
1122 error = 0;
1123
1124 /* swizzle bit order back, if necessary */
1125 swizzle_fdset(obits[0]);
1126 swizzle_fdset(obits[1]);
1127 swizzle_fdset(obits[2]);
1128 #undef swizzle_fdset
1129
1130 #define putbits(name, x) \
1131 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1132 error = error2;
1133 if (error == 0) {
1134 int error2;
1135
1136 putbits(fd_in, 0);
1137 putbits(fd_ou, 1);
1138 putbits(fd_ex, 2);
1139 #undef putbits
1140 }
1141 if (selbits != &s_selbits[0])
1142 free(selbits, M_SELECT);
1143
1144 return (error);
1145 }
1146 /*
1147 * Convert a select bit set to poll flags.
1148 *
1149 * The backend always returns POLLHUP/POLLERR if appropriate and we
1150 * return this as a set bit in any set.
1151 */
1152 static int select_flags[3] = {
1153 POLLRDNORM | POLLHUP | POLLERR,
1154 POLLWRNORM | POLLHUP | POLLERR,
1155 POLLRDBAND | POLLERR
1156 };
1157
1158 /*
1159 * Compute the fo_poll flags required for a fd given by the index and
1160 * bit position in the fd_mask array.
1161 */
1162 static __inline int
1163 selflags(fd_mask **ibits, int idx, fd_mask bit)
1164 {
1165 int flags;
1166 int msk;
1167
1168 flags = 0;
1169 for (msk = 0; msk < 3; msk++) {
1170 if (ibits[msk] == NULL)
1171 continue;
1172 if ((ibits[msk][idx] & bit) == 0)
1173 continue;
1174 flags |= select_flags[msk];
1175 }
1176 return (flags);
1177 }
1178
1179 /*
1180 * Set the appropriate output bits given a mask of fired events and the
1181 * input bits originally requested.
1182 */
1183 static __inline int
1184 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1185 {
1186 int msk;
1187 int n;
1188
1189 n = 0;
1190 for (msk = 0; msk < 3; msk++) {
1191 if ((events & select_flags[msk]) == 0)
1192 continue;
1193 if (ibits[msk] == NULL)
1194 continue;
1195 if ((ibits[msk][idx] & bit) == 0)
1196 continue;
1197 /*
1198 * XXX Check for a duplicate set. This can occur because a
1199 * socket calls selrecord() twice for each poll() call
1200 * resulting in two selfds per real fd. selrescan() will
1201 * call selsetbits twice as a result.
1202 */
1203 if ((obits[msk][idx] & bit) != 0)
1204 continue;
1205 obits[msk][idx] |= bit;
1206 n++;
1207 }
1208
1209 return (n);
1210 }
1211
1212 static __inline int
1213 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
1214 {
1215 cap_rights_t rights;
1216
1217 cap_rights_init(&rights, CAP_EVENT);
1218
1219 return (fget_unlocked(fdp, fd, &rights, 0, fpp, NULL));
1220 }
1221
1222 /*
1223 * Traverse the list of fds attached to this thread's seltd and check for
1224 * completion.
1225 */
1226 static int
1227 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1228 {
1229 struct filedesc *fdp;
1230 struct selinfo *si;
1231 struct seltd *stp;
1232 struct selfd *sfp;
1233 struct selfd *sfn;
1234 struct file *fp;
1235 fd_mask bit;
1236 int fd, ev, n, idx;
1237 int error;
1238
1239 fdp = td->td_proc->p_fd;
1240 stp = td->td_sel;
1241 n = 0;
1242 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1243 fd = (int)(uintptr_t)sfp->sf_cookie;
1244 si = sfp->sf_si;
1245 selfdfree(stp, sfp);
1246 /* If the selinfo wasn't cleared the event didn't fire. */
1247 if (si != NULL)
1248 continue;
1249 error = getselfd_cap(fdp, fd, &fp);
1250 if (error)
1251 return (error);
1252 idx = fd / NFDBITS;
1253 bit = (fd_mask)1 << (fd % NFDBITS);
1254 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1255 fdrop(fp, td);
1256 if (ev != 0)
1257 n += selsetbits(ibits, obits, idx, bit, ev);
1258 }
1259 stp->st_flags = 0;
1260 td->td_retval[0] = n;
1261 return (0);
1262 }
1263
1264 /*
1265 * Perform the initial filedescriptor scan and register ourselves with
1266 * each selinfo.
1267 */
1268 static int
1269 selscan(td, ibits, obits, nfd)
1270 struct thread *td;
1271 fd_mask **ibits, **obits;
1272 int nfd;
1273 {
1274 struct filedesc *fdp;
1275 struct file *fp;
1276 fd_mask bit;
1277 int ev, flags, end, fd;
1278 int n, idx;
1279 int error;
1280
1281 fdp = td->td_proc->p_fd;
1282 n = 0;
1283 for (idx = 0, fd = 0; fd < nfd; idx++) {
1284 end = imin(fd + NFDBITS, nfd);
1285 for (bit = 1; fd < end; bit <<= 1, fd++) {
1286 /* Compute the list of events we're interested in. */
1287 flags = selflags(ibits, idx, bit);
1288 if (flags == 0)
1289 continue;
1290 error = getselfd_cap(fdp, fd, &fp);
1291 if (error)
1292 return (error);
1293 selfdalloc(td, (void *)(uintptr_t)fd);
1294 ev = fo_poll(fp, flags, td->td_ucred, td);
1295 fdrop(fp, td);
1296 if (ev != 0)
1297 n += selsetbits(ibits, obits, idx, bit, ev);
1298 }
1299 }
1300
1301 td->td_retval[0] = n;
1302 return (0);
1303 }
1304
1305 int
1306 sys_poll(struct thread *td, struct poll_args *uap)
1307 {
1308 struct timespec ts, *tsp;
1309
1310 if (uap->timeout != INFTIM) {
1311 if (uap->timeout < 0)
1312 return (EINVAL);
1313 ts.tv_sec = uap->timeout / 1000;
1314 ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1315 tsp = &ts;
1316 } else
1317 tsp = NULL;
1318
1319 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1320 }
1321
1322 int
1323 kern_poll(struct thread *td, struct pollfd *fds, u_int nfds,
1324 struct timespec *tsp, sigset_t *uset)
1325 {
1326 struct pollfd *bits;
1327 struct pollfd smallbits[32];
1328 sbintime_t sbt, precision, tmp;
1329 time_t over;
1330 struct timespec ts;
1331 int error;
1332 size_t ni;
1333
1334 precision = 0;
1335 if (tsp != NULL) {
1336 if (tsp->tv_sec < 0)
1337 return (EINVAL);
1338 if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
1339 return (EINVAL);
1340 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1341 sbt = 0;
1342 else {
1343 ts = *tsp;
1344 if (ts.tv_sec > INT32_MAX / 2) {
1345 over = ts.tv_sec - INT32_MAX / 2;
1346 ts.tv_sec -= over;
1347 } else
1348 over = 0;
1349 tmp = tstosbt(ts);
1350 precision = tmp;
1351 precision >>= tc_precexp;
1352 if (TIMESEL(&sbt, tmp))
1353 sbt += tc_tick_sbt;
1354 sbt += tmp;
1355 }
1356 } else
1357 sbt = -1;
1358
1359 if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1360 return (EINVAL);
1361 ni = nfds * sizeof(struct pollfd);
1362 if (ni > sizeof(smallbits))
1363 bits = malloc(ni, M_TEMP, M_WAITOK);
1364 else
1365 bits = smallbits;
1366 error = copyin(fds, bits, ni);
1367 if (error)
1368 goto done;
1369
1370 if (uset != NULL) {
1371 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1372 &td->td_oldsigmask, 0);
1373 if (error)
1374 goto done;
1375 td->td_pflags |= TDP_OLDMASK;
1376 /*
1377 * Make sure that ast() is called on return to
1378 * usermode and TDP_OLDMASK is cleared, restoring old
1379 * sigmask.
1380 */
1381 thread_lock(td);
1382 td->td_flags |= TDF_ASTPENDING;
1383 thread_unlock(td);
1384 }
1385
1386 seltdinit(td);
1387 /* Iterate until the timeout expires or descriptors become ready. */
1388 for (;;) {
1389 error = pollscan(td, bits, nfds);
1390 if (error || td->td_retval[0] != 0)
1391 break;
1392 error = seltdwait(td, sbt, precision);
1393 if (error)
1394 break;
1395 error = pollrescan(td);
1396 if (error || td->td_retval[0] != 0)
1397 break;
1398 }
1399 seltdclear(td);
1400
1401 done:
1402 /* poll is not restarted after signals... */
1403 if (error == ERESTART)
1404 error = EINTR;
1405 if (error == EWOULDBLOCK)
1406 error = 0;
1407 if (error == 0) {
1408 error = pollout(td, bits, fds, nfds);
1409 if (error)
1410 goto out;
1411 }
1412 out:
1413 if (ni > sizeof(smallbits))
1414 free(bits, M_TEMP);
1415 return (error);
1416 }
1417
1418 int
1419 sys_ppoll(struct thread *td, struct ppoll_args *uap)
1420 {
1421 struct timespec ts, *tsp;
1422 sigset_t set, *ssp;
1423 int error;
1424
1425 if (uap->ts != NULL) {
1426 error = copyin(uap->ts, &ts, sizeof(ts));
1427 if (error)
1428 return (error);
1429 tsp = &ts;
1430 } else
1431 tsp = NULL;
1432 if (uap->set != NULL) {
1433 error = copyin(uap->set, &set, sizeof(set));
1434 if (error)
1435 return (error);
1436 ssp = &set;
1437 } else
1438 ssp = NULL;
1439 /*
1440 * fds is still a pointer to user space. kern_poll() will
1441 * take care of copyin that array to the kernel space.
1442 */
1443
1444 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1445 }
1446
1447 static int
1448 pollrescan(struct thread *td)
1449 {
1450 struct seltd *stp;
1451 struct selfd *sfp;
1452 struct selfd *sfn;
1453 struct selinfo *si;
1454 struct filedesc *fdp;
1455 struct file *fp;
1456 struct pollfd *fd;
1457 #ifdef CAPABILITIES
1458 cap_rights_t rights;
1459 #endif
1460 int n;
1461
1462 n = 0;
1463 fdp = td->td_proc->p_fd;
1464 stp = td->td_sel;
1465 FILEDESC_SLOCK(fdp);
1466 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1467 fd = (struct pollfd *)sfp->sf_cookie;
1468 si = sfp->sf_si;
1469 selfdfree(stp, sfp);
1470 /* If the selinfo wasn't cleared the event didn't fire. */
1471 if (si != NULL)
1472 continue;
1473 fp = fdp->fd_ofiles[fd->fd].fde_file;
1474 #ifdef CAPABILITIES
1475 if (fp == NULL ||
1476 cap_check(cap_rights(fdp, fd->fd),
1477 cap_rights_init(&rights, CAP_EVENT)) != 0)
1478 #else
1479 if (fp == NULL)
1480 #endif
1481 {
1482 fd->revents = POLLNVAL;
1483 n++;
1484 continue;
1485 }
1486
1487 /*
1488 * Note: backend also returns POLLHUP and
1489 * POLLERR if appropriate.
1490 */
1491 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1492 if (fd->revents != 0)
1493 n++;
1494 }
1495 FILEDESC_SUNLOCK(fdp);
1496 stp->st_flags = 0;
1497 td->td_retval[0] = n;
1498 return (0);
1499 }
1500
1501
1502 static int
1503 pollout(td, fds, ufds, nfd)
1504 struct thread *td;
1505 struct pollfd *fds;
1506 struct pollfd *ufds;
1507 u_int nfd;
1508 {
1509 int error = 0;
1510 u_int i = 0;
1511 u_int n = 0;
1512
1513 for (i = 0; i < nfd; i++) {
1514 error = copyout(&fds->revents, &ufds->revents,
1515 sizeof(ufds->revents));
1516 if (error)
1517 return (error);
1518 if (fds->revents != 0)
1519 n++;
1520 fds++;
1521 ufds++;
1522 }
1523 td->td_retval[0] = n;
1524 return (0);
1525 }
1526
1527 static int
1528 pollscan(td, fds, nfd)
1529 struct thread *td;
1530 struct pollfd *fds;
1531 u_int nfd;
1532 {
1533 struct filedesc *fdp = td->td_proc->p_fd;
1534 struct file *fp;
1535 #ifdef CAPABILITIES
1536 cap_rights_t rights;
1537 #endif
1538 int i, n = 0;
1539
1540 FILEDESC_SLOCK(fdp);
1541 for (i = 0; i < nfd; i++, fds++) {
1542 if (fds->fd > fdp->fd_lastfile) {
1543 fds->revents = POLLNVAL;
1544 n++;
1545 } else if (fds->fd < 0) {
1546 fds->revents = 0;
1547 } else {
1548 fp = fdp->fd_ofiles[fds->fd].fde_file;
1549 #ifdef CAPABILITIES
1550 if (fp == NULL ||
1551 cap_check(cap_rights(fdp, fds->fd),
1552 cap_rights_init(&rights, CAP_EVENT)) != 0)
1553 #else
1554 if (fp == NULL)
1555 #endif
1556 {
1557 fds->revents = POLLNVAL;
1558 n++;
1559 } else {
1560 /*
1561 * Note: backend also returns POLLHUP and
1562 * POLLERR if appropriate.
1563 */
1564 selfdalloc(td, fds);
1565 fds->revents = fo_poll(fp, fds->events,
1566 td->td_ucred, td);
1567 /*
1568 * POSIX requires POLLOUT to be never
1569 * set simultaneously with POLLHUP.
1570 */
1571 if ((fds->revents & POLLHUP) != 0)
1572 fds->revents &= ~POLLOUT;
1573
1574 if (fds->revents != 0)
1575 n++;
1576 }
1577 }
1578 }
1579 FILEDESC_SUNLOCK(fdp);
1580 td->td_retval[0] = n;
1581 return (0);
1582 }
1583
1584 /*
1585 * OpenBSD poll system call.
1586 *
1587 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1588 */
1589 #ifndef _SYS_SYSPROTO_H_
1590 struct openbsd_poll_args {
1591 struct pollfd *fds;
1592 u_int nfds;
1593 int timeout;
1594 };
1595 #endif
1596 int
1597 sys_openbsd_poll(td, uap)
1598 register struct thread *td;
1599 register struct openbsd_poll_args *uap;
1600 {
1601 return (sys_poll(td, (struct poll_args *)uap));
1602 }
1603
1604 /*
1605 * XXX This was created specifically to support netncp and netsmb. This
1606 * allows the caller to specify a socket to wait for events on. It returns
1607 * 0 if any events matched and an error otherwise. There is no way to
1608 * determine which events fired.
1609 */
1610 int
1611 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1612 {
1613 struct timeval rtv;
1614 sbintime_t asbt, precision, rsbt;
1615 int error;
1616
1617 precision = 0; /* stupid gcc! */
1618 if (tvp != NULL) {
1619 rtv = *tvp;
1620 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1621 rtv.tv_usec >= 1000000)
1622 return (EINVAL);
1623 if (!timevalisset(&rtv))
1624 asbt = 0;
1625 else if (rtv.tv_sec <= INT32_MAX) {
1626 rsbt = tvtosbt(rtv);
1627 precision = rsbt;
1628 precision >>= tc_precexp;
1629 if (TIMESEL(&asbt, rsbt))
1630 asbt += tc_tick_sbt;
1631 if (asbt <= SBT_MAX - rsbt)
1632 asbt += rsbt;
1633 else
1634 asbt = -1;
1635 } else
1636 asbt = -1;
1637 } else
1638 asbt = -1;
1639 seltdinit(td);
1640 /*
1641 * Iterate until the timeout expires or the socket becomes ready.
1642 */
1643 for (;;) {
1644 selfdalloc(td, NULL);
1645 error = sopoll(so, events, NULL, td);
1646 /* error here is actually the ready events. */
1647 if (error)
1648 return (0);
1649 error = seltdwait(td, asbt, precision);
1650 if (error)
1651 break;
1652 }
1653 seltdclear(td);
1654 /* XXX Duplicates ncp/smb behavior. */
1655 if (error == ERESTART)
1656 error = 0;
1657 return (error);
1658 }
1659
1660 /*
1661 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1662 * have two select sets, one for read and another for write.
1663 */
1664 static void
1665 selfdalloc(struct thread *td, void *cookie)
1666 {
1667 struct seltd *stp;
1668
1669 stp = td->td_sel;
1670 if (stp->st_free1 == NULL)
1671 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1672 stp->st_free1->sf_td = stp;
1673 stp->st_free1->sf_cookie = cookie;
1674 if (stp->st_free2 == NULL)
1675 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1676 stp->st_free2->sf_td = stp;
1677 stp->st_free2->sf_cookie = cookie;
1678 }
1679
1680 static void
1681 selfdfree(struct seltd *stp, struct selfd *sfp)
1682 {
1683 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1684 mtx_lock(sfp->sf_mtx);
1685 if (sfp->sf_si)
1686 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1687 mtx_unlock(sfp->sf_mtx);
1688 uma_zfree(selfd_zone, sfp);
1689 }
1690
1691 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1692 void
1693 seldrain(sip)
1694 struct selinfo *sip;
1695 {
1696
1697 /*
1698 * This feature is already provided by doselwakeup(), thus it is
1699 * enough to go for it.
1700 * Eventually, the context, should take care to avoid races
1701 * between thread calling select()/poll() and file descriptor
1702 * detaching, but, again, the races are just the same as
1703 * selwakeup().
1704 */
1705 doselwakeup(sip, -1);
1706 }
1707
1708 /*
1709 * Record a select request.
1710 */
1711 void
1712 selrecord(selector, sip)
1713 struct thread *selector;
1714 struct selinfo *sip;
1715 {
1716 struct selfd *sfp;
1717 struct seltd *stp;
1718 struct mtx *mtxp;
1719
1720 stp = selector->td_sel;
1721 /*
1722 * Don't record when doing a rescan.
1723 */
1724 if (stp->st_flags & SELTD_RESCAN)
1725 return;
1726 /*
1727 * Grab one of the preallocated descriptors.
1728 */
1729 sfp = NULL;
1730 if ((sfp = stp->st_free1) != NULL)
1731 stp->st_free1 = NULL;
1732 else if ((sfp = stp->st_free2) != NULL)
1733 stp->st_free2 = NULL;
1734 else
1735 panic("selrecord: No free selfd on selq");
1736 mtxp = sip->si_mtx;
1737 if (mtxp == NULL)
1738 mtxp = mtx_pool_find(mtxpool_select, sip);
1739 /*
1740 * Initialize the sfp and queue it in the thread.
1741 */
1742 sfp->sf_si = sip;
1743 sfp->sf_mtx = mtxp;
1744 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1745 /*
1746 * Now that we've locked the sip, check for initialization.
1747 */
1748 mtx_lock(mtxp);
1749 if (sip->si_mtx == NULL) {
1750 sip->si_mtx = mtxp;
1751 TAILQ_INIT(&sip->si_tdlist);
1752 }
1753 /*
1754 * Add this thread to the list of selfds listening on this selinfo.
1755 */
1756 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1757 mtx_unlock(sip->si_mtx);
1758 }
1759
1760 /* Wake up a selecting thread. */
1761 void
1762 selwakeup(sip)
1763 struct selinfo *sip;
1764 {
1765 doselwakeup(sip, -1);
1766 }
1767
1768 /* Wake up a selecting thread, and set its priority. */
1769 void
1770 selwakeuppri(sip, pri)
1771 struct selinfo *sip;
1772 int pri;
1773 {
1774 doselwakeup(sip, pri);
1775 }
1776
1777 /*
1778 * Do a wakeup when a selectable event occurs.
1779 */
1780 static void
1781 doselwakeup(sip, pri)
1782 struct selinfo *sip;
1783 int pri;
1784 {
1785 struct selfd *sfp;
1786 struct selfd *sfn;
1787 struct seltd *stp;
1788
1789 /* If it's not initialized there can't be any waiters. */
1790 if (sip->si_mtx == NULL)
1791 return;
1792 /*
1793 * Locking the selinfo locks all selfds associated with it.
1794 */
1795 mtx_lock(sip->si_mtx);
1796 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1797 /*
1798 * Once we remove this sfp from the list and clear the
1799 * sf_si seltdclear will know to ignore this si.
1800 */
1801 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1802 sfp->sf_si = NULL;
1803 stp = sfp->sf_td;
1804 mtx_lock(&stp->st_mtx);
1805 stp->st_flags |= SELTD_PENDING;
1806 cv_broadcastpri(&stp->st_wait, pri);
1807 mtx_unlock(&stp->st_mtx);
1808 }
1809 mtx_unlock(sip->si_mtx);
1810 }
1811
1812 static void
1813 seltdinit(struct thread *td)
1814 {
1815 struct seltd *stp;
1816
1817 if ((stp = td->td_sel) != NULL)
1818 goto out;
1819 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1820 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1821 cv_init(&stp->st_wait, "select");
1822 out:
1823 stp->st_flags = 0;
1824 STAILQ_INIT(&stp->st_selq);
1825 }
1826
1827 static int
1828 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1829 {
1830 struct seltd *stp;
1831 int error;
1832
1833 stp = td->td_sel;
1834 /*
1835 * An event of interest may occur while we do not hold the seltd
1836 * locked so check the pending flag before we sleep.
1837 */
1838 mtx_lock(&stp->st_mtx);
1839 /*
1840 * Any further calls to selrecord will be a rescan.
1841 */
1842 stp->st_flags |= SELTD_RESCAN;
1843 if (stp->st_flags & SELTD_PENDING) {
1844 mtx_unlock(&stp->st_mtx);
1845 return (0);
1846 }
1847 if (sbt == 0)
1848 error = EWOULDBLOCK;
1849 else if (sbt != -1)
1850 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
1851 sbt, precision, C_ABSOLUTE);
1852 else
1853 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1854 mtx_unlock(&stp->st_mtx);
1855
1856 return (error);
1857 }
1858
1859 void
1860 seltdfini(struct thread *td)
1861 {
1862 struct seltd *stp;
1863
1864 stp = td->td_sel;
1865 if (stp == NULL)
1866 return;
1867 if (stp->st_free1)
1868 uma_zfree(selfd_zone, stp->st_free1);
1869 if (stp->st_free2)
1870 uma_zfree(selfd_zone, stp->st_free2);
1871 td->td_sel = NULL;
1872 free(stp, M_SELECT);
1873 }
1874
1875 /*
1876 * Remove the references to the thread from all of the objects we were
1877 * polling.
1878 */
1879 static void
1880 seltdclear(struct thread *td)
1881 {
1882 struct seltd *stp;
1883 struct selfd *sfp;
1884 struct selfd *sfn;
1885
1886 stp = td->td_sel;
1887 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1888 selfdfree(stp, sfp);
1889 stp->st_flags = 0;
1890 }
1891
1892 static void selectinit(void *);
1893 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1894 static void
1895 selectinit(void *dummy __unused)
1896 {
1897
1898 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL,
1899 NULL, NULL, UMA_ALIGN_PTR, 0);
1900 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
1901 }
Cache object: 53e43c7603ec4302ea9b7717c7a23ae1
|