1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39
40 #include "opt_capsicum.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/capability.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/file.h>
52 #include <sys/proc.h>
53 #include <sys/signalvar.h>
54 #include <sys/socketvar.h>
55 #include <sys/uio.h>
56 #include <sys/kernel.h>
57 #include <sys/ktr.h>
58 #include <sys/limits.h>
59 #include <sys/malloc.h>
60 #include <sys/poll.h>
61 #include <sys/resourcevar.h>
62 #include <sys/selinfo.h>
63 #include <sys/sleepqueue.h>
64 #include <sys/syscallsubr.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
67 #include <sys/vnode.h>
68 #include <sys/bio.h>
69 #include <sys/buf.h>
70 #include <sys/condvar.h>
71 #ifdef KTRACE
72 #include <sys/ktrace.h>
73 #endif
74
75 #include <security/audit/audit.h>
76
77 /*
78 * The following macro defines how many bytes will be allocated from
79 * the stack instead of memory allocated when passing the IOCTL data
80 * structures from userspace and to the kernel. Some IOCTLs having
81 * small data structures are used very frequently and this small
82 * buffer on the stack gives a significant speedup improvement for
83 * those requests. The value of this define should be greater or equal
84 * to 64 bytes and should also be power of two. The data structure is
85 * currently hard-aligned to a 8-byte boundary on the stack. This
86 * should currently be sufficient for all supported platforms.
87 */
88 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */
89 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */
90
91 int iosize_max_clamp = 1;
92 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
93 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
94 int devfs_iosize_max_clamp = 1;
95 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
96 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
97
98 /*
99 * Assert that the return value of read(2) and write(2) syscalls fits
100 * into a register. If not, an architecture will need to provide the
101 * usermode wrappers to reconstruct the result.
102 */
103 CTASSERT(sizeof(register_t) >= sizeof(size_t));
104
105 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
106 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
107 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
108
109 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
110 u_int);
111 static int pollscan(struct thread *, struct pollfd *, u_int);
112 static int pollrescan(struct thread *);
113 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
114 static int selrescan(struct thread *, fd_mask **, fd_mask **);
115 static void selfdalloc(struct thread *, void *);
116 static void selfdfree(struct seltd *, struct selfd *);
117 static int dofileread(struct thread *, int, struct file *, struct uio *,
118 off_t, int);
119 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
120 off_t, int);
121 static void doselwakeup(struct selinfo *, int);
122 static void seltdinit(struct thread *);
123 static int seltdwait(struct thread *, int);
124 static void seltdclear(struct thread *);
125
126 /*
127 * One seltd per-thread allocated on demand as needed.
128 *
129 * t - protected by st_mtx
130 * k - Only accessed by curthread or read-only
131 */
132 struct seltd {
133 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
134 struct selfd *st_free1; /* (k) free fd for read set. */
135 struct selfd *st_free2; /* (k) free fd for write set. */
136 struct mtx st_mtx; /* Protects struct seltd */
137 struct cv st_wait; /* (t) Wait channel. */
138 int st_flags; /* (t) SELTD_ flags. */
139 };
140
141 #define SELTD_PENDING 0x0001 /* We have pending events. */
142 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
143
144 /*
145 * One selfd allocated per-thread per-file-descriptor.
146 * f - protected by sf_mtx
147 */
148 struct selfd {
149 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
150 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
151 struct selinfo *sf_si; /* (f) selinfo when linked. */
152 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
153 struct seltd *sf_td; /* (k) owning seltd. */
154 void *sf_cookie; /* (k) fd or pollfd. */
155 };
156
157 static uma_zone_t selfd_zone;
158 static struct mtx_pool *mtxpool_select;
159
160 #ifndef _SYS_SYSPROTO_H_
161 struct read_args {
162 int fd;
163 void *buf;
164 size_t nbyte;
165 };
166 #endif
167 int
168 sys_read(td, uap)
169 struct thread *td;
170 struct read_args *uap;
171 {
172 struct uio auio;
173 struct iovec aiov;
174 int error;
175
176 if (uap->nbyte > IOSIZE_MAX)
177 return (EINVAL);
178 aiov.iov_base = uap->buf;
179 aiov.iov_len = uap->nbyte;
180 auio.uio_iov = &aiov;
181 auio.uio_iovcnt = 1;
182 auio.uio_resid = uap->nbyte;
183 auio.uio_segflg = UIO_USERSPACE;
184 error = kern_readv(td, uap->fd, &auio);
185 return(error);
186 }
187
188 /*
189 * Positioned read system call
190 */
191 #ifndef _SYS_SYSPROTO_H_
192 struct pread_args {
193 int fd;
194 void *buf;
195 size_t nbyte;
196 int pad;
197 off_t offset;
198 };
199 #endif
200 int
201 sys_pread(td, uap)
202 struct thread *td;
203 struct pread_args *uap;
204 {
205 struct uio auio;
206 struct iovec aiov;
207 int error;
208
209 if (uap->nbyte > IOSIZE_MAX)
210 return (EINVAL);
211 aiov.iov_base = uap->buf;
212 aiov.iov_len = uap->nbyte;
213 auio.uio_iov = &aiov;
214 auio.uio_iovcnt = 1;
215 auio.uio_resid = uap->nbyte;
216 auio.uio_segflg = UIO_USERSPACE;
217 error = kern_preadv(td, uap->fd, &auio, uap->offset);
218 return(error);
219 }
220
221 int
222 freebsd6_pread(td, uap)
223 struct thread *td;
224 struct freebsd6_pread_args *uap;
225 {
226 struct pread_args oargs;
227
228 oargs.fd = uap->fd;
229 oargs.buf = uap->buf;
230 oargs.nbyte = uap->nbyte;
231 oargs.offset = uap->offset;
232 return (sys_pread(td, &oargs));
233 }
234
235 /*
236 * Scatter read system call.
237 */
238 #ifndef _SYS_SYSPROTO_H_
239 struct readv_args {
240 int fd;
241 struct iovec *iovp;
242 u_int iovcnt;
243 };
244 #endif
245 int
246 sys_readv(struct thread *td, struct readv_args *uap)
247 {
248 struct uio *auio;
249 int error;
250
251 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
252 if (error)
253 return (error);
254 error = kern_readv(td, uap->fd, auio);
255 free(auio, M_IOV);
256 return (error);
257 }
258
259 int
260 kern_readv(struct thread *td, int fd, struct uio *auio)
261 {
262 struct file *fp;
263 int error;
264
265 error = fget_read(td, fd, CAP_READ | CAP_SEEK, &fp);
266 if (error)
267 return (error);
268 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
269 fdrop(fp, td);
270 return (error);
271 }
272
273 /*
274 * Scatter positioned read system call.
275 */
276 #ifndef _SYS_SYSPROTO_H_
277 struct preadv_args {
278 int fd;
279 struct iovec *iovp;
280 u_int iovcnt;
281 off_t offset;
282 };
283 #endif
284 int
285 sys_preadv(struct thread *td, struct preadv_args *uap)
286 {
287 struct uio *auio;
288 int error;
289
290 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
291 if (error)
292 return (error);
293 error = kern_preadv(td, uap->fd, auio, uap->offset);
294 free(auio, M_IOV);
295 return (error);
296 }
297
298 int
299 kern_preadv(td, fd, auio, offset)
300 struct thread *td;
301 int fd;
302 struct uio *auio;
303 off_t offset;
304 {
305 struct file *fp;
306 int error;
307
308 error = fget_read(td, fd, CAP_READ, &fp);
309 if (error)
310 return (error);
311 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
312 error = ESPIPE;
313 else if (offset < 0 && fp->f_vnode->v_type != VCHR)
314 error = EINVAL;
315 else
316 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
317 fdrop(fp, td);
318 return (error);
319 }
320
321 /*
322 * Common code for readv and preadv that reads data in
323 * from a file using the passed in uio, offset, and flags.
324 */
325 static int
326 dofileread(td, fd, fp, auio, offset, flags)
327 struct thread *td;
328 int fd;
329 struct file *fp;
330 struct uio *auio;
331 off_t offset;
332 int flags;
333 {
334 ssize_t cnt;
335 int error;
336 #ifdef KTRACE
337 struct uio *ktruio = NULL;
338 #endif
339
340 /* Finish zero length reads right here */
341 if (auio->uio_resid == 0) {
342 td->td_retval[0] = 0;
343 return(0);
344 }
345 auio->uio_rw = UIO_READ;
346 auio->uio_offset = offset;
347 auio->uio_td = td;
348 #ifdef KTRACE
349 if (KTRPOINT(td, KTR_GENIO))
350 ktruio = cloneuio(auio);
351 #endif
352 cnt = auio->uio_resid;
353 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
354 if (auio->uio_resid != cnt && (error == ERESTART ||
355 error == EINTR || error == EWOULDBLOCK))
356 error = 0;
357 }
358 cnt -= auio->uio_resid;
359 #ifdef KTRACE
360 if (ktruio != NULL) {
361 ktruio->uio_resid = cnt;
362 ktrgenio(fd, UIO_READ, ktruio, error);
363 }
364 #endif
365 td->td_retval[0] = cnt;
366 return (error);
367 }
368
369 #ifndef _SYS_SYSPROTO_H_
370 struct write_args {
371 int fd;
372 const void *buf;
373 size_t nbyte;
374 };
375 #endif
376 int
377 sys_write(td, uap)
378 struct thread *td;
379 struct write_args *uap;
380 {
381 struct uio auio;
382 struct iovec aiov;
383 int error;
384
385 if (uap->nbyte > IOSIZE_MAX)
386 return (EINVAL);
387 aiov.iov_base = (void *)(uintptr_t)uap->buf;
388 aiov.iov_len = uap->nbyte;
389 auio.uio_iov = &aiov;
390 auio.uio_iovcnt = 1;
391 auio.uio_resid = uap->nbyte;
392 auio.uio_segflg = UIO_USERSPACE;
393 error = kern_writev(td, uap->fd, &auio);
394 return(error);
395 }
396
397 /*
398 * Positioned write system call.
399 */
400 #ifndef _SYS_SYSPROTO_H_
401 struct pwrite_args {
402 int fd;
403 const void *buf;
404 size_t nbyte;
405 int pad;
406 off_t offset;
407 };
408 #endif
409 int
410 sys_pwrite(td, uap)
411 struct thread *td;
412 struct pwrite_args *uap;
413 {
414 struct uio auio;
415 struct iovec aiov;
416 int error;
417
418 if (uap->nbyte > IOSIZE_MAX)
419 return (EINVAL);
420 aiov.iov_base = (void *)(uintptr_t)uap->buf;
421 aiov.iov_len = uap->nbyte;
422 auio.uio_iov = &aiov;
423 auio.uio_iovcnt = 1;
424 auio.uio_resid = uap->nbyte;
425 auio.uio_segflg = UIO_USERSPACE;
426 error = kern_pwritev(td, uap->fd, &auio, uap->offset);
427 return(error);
428 }
429
430 int
431 freebsd6_pwrite(td, uap)
432 struct thread *td;
433 struct freebsd6_pwrite_args *uap;
434 {
435 struct pwrite_args oargs;
436
437 oargs.fd = uap->fd;
438 oargs.buf = uap->buf;
439 oargs.nbyte = uap->nbyte;
440 oargs.offset = uap->offset;
441 return (sys_pwrite(td, &oargs));
442 }
443
444 /*
445 * Gather write system call.
446 */
447 #ifndef _SYS_SYSPROTO_H_
448 struct writev_args {
449 int fd;
450 struct iovec *iovp;
451 u_int iovcnt;
452 };
453 #endif
454 int
455 sys_writev(struct thread *td, struct writev_args *uap)
456 {
457 struct uio *auio;
458 int error;
459
460 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
461 if (error)
462 return (error);
463 error = kern_writev(td, uap->fd, auio);
464 free(auio, M_IOV);
465 return (error);
466 }
467
468 int
469 kern_writev(struct thread *td, int fd, struct uio *auio)
470 {
471 struct file *fp;
472 int error;
473
474 error = fget_write(td, fd, CAP_WRITE | CAP_SEEK, &fp);
475 if (error)
476 return (error);
477 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
478 fdrop(fp, td);
479 return (error);
480 }
481
482 /*
483 * Gather positioned write system call.
484 */
485 #ifndef _SYS_SYSPROTO_H_
486 struct pwritev_args {
487 int fd;
488 struct iovec *iovp;
489 u_int iovcnt;
490 off_t offset;
491 };
492 #endif
493 int
494 sys_pwritev(struct thread *td, struct pwritev_args *uap)
495 {
496 struct uio *auio;
497 int error;
498
499 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
500 if (error)
501 return (error);
502 error = kern_pwritev(td, uap->fd, auio, uap->offset);
503 free(auio, M_IOV);
504 return (error);
505 }
506
507 int
508 kern_pwritev(td, fd, auio, offset)
509 struct thread *td;
510 struct uio *auio;
511 int fd;
512 off_t offset;
513 {
514 struct file *fp;
515 int error;
516
517 error = fget_write(td, fd, CAP_WRITE, &fp);
518 if (error)
519 return (error);
520 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
521 error = ESPIPE;
522 else if (offset < 0 && fp->f_vnode->v_type != VCHR)
523 error = EINVAL;
524 else
525 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
526 fdrop(fp, td);
527 return (error);
528 }
529
530 /*
531 * Common code for writev and pwritev that writes data to
532 * a file using the passed in uio, offset, and flags.
533 */
534 static int
535 dofilewrite(td, fd, fp, auio, offset, flags)
536 struct thread *td;
537 int fd;
538 struct file *fp;
539 struct uio *auio;
540 off_t offset;
541 int flags;
542 {
543 ssize_t cnt;
544 int error;
545 #ifdef KTRACE
546 struct uio *ktruio = NULL;
547 #endif
548
549 auio->uio_rw = UIO_WRITE;
550 auio->uio_td = td;
551 auio->uio_offset = offset;
552 #ifdef KTRACE
553 if (KTRPOINT(td, KTR_GENIO))
554 ktruio = cloneuio(auio);
555 #endif
556 cnt = auio->uio_resid;
557 if (fp->f_type == DTYPE_VNODE &&
558 (fp->f_vnread_flags & FDEVFS_VNODE) == 0)
559 bwillwrite();
560 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
561 if (auio->uio_resid != cnt && (error == ERESTART ||
562 error == EINTR || error == EWOULDBLOCK))
563 error = 0;
564 /* Socket layer is responsible for issuing SIGPIPE. */
565 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
566 PROC_LOCK(td->td_proc);
567 tdsignal(td, SIGPIPE);
568 PROC_UNLOCK(td->td_proc);
569 }
570 }
571 cnt -= auio->uio_resid;
572 #ifdef KTRACE
573 if (ktruio != NULL) {
574 ktruio->uio_resid = cnt;
575 ktrgenio(fd, UIO_WRITE, ktruio, error);
576 }
577 #endif
578 td->td_retval[0] = cnt;
579 return (error);
580 }
581
582 /*
583 * Truncate a file given a file descriptor.
584 *
585 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
586 * descriptor isn't writable.
587 */
588 int
589 kern_ftruncate(td, fd, length)
590 struct thread *td;
591 int fd;
592 off_t length;
593 {
594 struct file *fp;
595 int error;
596
597 AUDIT_ARG_FD(fd);
598 if (length < 0)
599 return (EINVAL);
600 error = fget(td, fd, CAP_FTRUNCATE, &fp);
601 if (error)
602 return (error);
603 AUDIT_ARG_FILE(td->td_proc, fp);
604 if (!(fp->f_flag & FWRITE)) {
605 fdrop(fp, td);
606 return (EINVAL);
607 }
608 error = fo_truncate(fp, length, td->td_ucred, td);
609 fdrop(fp, td);
610 return (error);
611 }
612
613 #ifndef _SYS_SYSPROTO_H_
614 struct ftruncate_args {
615 int fd;
616 int pad;
617 off_t length;
618 };
619 #endif
620 int
621 sys_ftruncate(td, uap)
622 struct thread *td;
623 struct ftruncate_args *uap;
624 {
625
626 return (kern_ftruncate(td, uap->fd, uap->length));
627 }
628
629 #if defined(COMPAT_43)
630 #ifndef _SYS_SYSPROTO_H_
631 struct oftruncate_args {
632 int fd;
633 long length;
634 };
635 #endif
636 int
637 oftruncate(td, uap)
638 struct thread *td;
639 struct oftruncate_args *uap;
640 {
641
642 return (kern_ftruncate(td, uap->fd, uap->length));
643 }
644 #endif /* COMPAT_43 */
645
646 #ifndef _SYS_SYSPROTO_H_
647 struct ioctl_args {
648 int fd;
649 u_long com;
650 caddr_t data;
651 };
652 #endif
653 /* ARGSUSED */
654 int
655 sys_ioctl(struct thread *td, struct ioctl_args *uap)
656 {
657 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
658 u_long com;
659 int arg, error;
660 u_int size;
661 caddr_t data;
662
663 if (uap->com > 0xffffffff) {
664 printf(
665 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
666 td->td_proc->p_pid, td->td_name, uap->com);
667 uap->com &= 0xffffffff;
668 }
669 com = uap->com;
670
671 /*
672 * Interpret high order word to find amount of data to be
673 * copied to/from the user's address space.
674 */
675 size = IOCPARM_LEN(com);
676 if ((size > IOCPARM_MAX) ||
677 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
678 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
679 ((com & IOC_OUT) && size == 0) ||
680 #else
681 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
682 #endif
683 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
684 return (ENOTTY);
685
686 if (size > 0) {
687 if (com & IOC_VOID) {
688 /* Integer argument. */
689 arg = (intptr_t)uap->data;
690 data = (void *)&arg;
691 size = 0;
692 } else {
693 if (size > SYS_IOCTL_SMALL_SIZE)
694 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
695 else
696 data = smalldata;
697 }
698 } else
699 data = (void *)&uap->data;
700 if (com & IOC_IN) {
701 error = copyin(uap->data, data, (u_int)size);
702 if (error != 0)
703 goto out;
704 } else if (com & IOC_OUT) {
705 /*
706 * Zero the buffer so the user always
707 * gets back something deterministic.
708 */
709 bzero(data, size);
710 }
711
712 error = kern_ioctl(td, uap->fd, com, data);
713
714 if (error == 0 && (com & IOC_OUT))
715 error = copyout(data, uap->data, (u_int)size);
716
717 out:
718 if (size > SYS_IOCTL_SMALL_SIZE)
719 free(data, M_IOCTLOPS);
720 return (error);
721 }
722
723 int
724 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
725 {
726 struct file *fp;
727 struct filedesc *fdp;
728 int error;
729 int tmp;
730
731 AUDIT_ARG_FD(fd);
732 AUDIT_ARG_CMD(com);
733 if ((error = fget(td, fd, CAP_IOCTL, &fp)) != 0)
734 return (error);
735 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
736 fdrop(fp, td);
737 return (EBADF);
738 }
739 fdp = td->td_proc->p_fd;
740 switch (com) {
741 case FIONCLEX:
742 FILEDESC_XLOCK(fdp);
743 fdp->fd_ofileflags[fd] &= ~UF_EXCLOSE;
744 FILEDESC_XUNLOCK(fdp);
745 goto out;
746 case FIOCLEX:
747 FILEDESC_XLOCK(fdp);
748 fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
749 FILEDESC_XUNLOCK(fdp);
750 goto out;
751 case FIONBIO:
752 if ((tmp = *(int *)data))
753 atomic_set_int(&fp->f_flag, FNONBLOCK);
754 else
755 atomic_clear_int(&fp->f_flag, FNONBLOCK);
756 data = (void *)&tmp;
757 break;
758 case FIOASYNC:
759 if ((tmp = *(int *)data))
760 atomic_set_int(&fp->f_flag, FASYNC);
761 else
762 atomic_clear_int(&fp->f_flag, FASYNC);
763 data = (void *)&tmp;
764 break;
765 }
766
767 error = fo_ioctl(fp, com, data, td->td_ucred, td);
768 out:
769 fdrop(fp, td);
770 return (error);
771 }
772
773 int
774 poll_no_poll(int events)
775 {
776 /*
777 * Return true for read/write. If the user asked for something
778 * special, return POLLNVAL, so that clients have a way of
779 * determining reliably whether or not the extended
780 * functionality is present without hard-coding knowledge
781 * of specific filesystem implementations.
782 */
783 if (events & ~POLLSTANDARD)
784 return (POLLNVAL);
785
786 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
787 }
788
789 int
790 sys_pselect(struct thread *td, struct pselect_args *uap)
791 {
792 struct timespec ts;
793 struct timeval tv, *tvp;
794 sigset_t set, *uset;
795 int error;
796
797 if (uap->ts != NULL) {
798 error = copyin(uap->ts, &ts, sizeof(ts));
799 if (error != 0)
800 return (error);
801 TIMESPEC_TO_TIMEVAL(&tv, &ts);
802 tvp = &tv;
803 } else
804 tvp = NULL;
805 if (uap->sm != NULL) {
806 error = copyin(uap->sm, &set, sizeof(set));
807 if (error != 0)
808 return (error);
809 uset = &set;
810 } else
811 uset = NULL;
812 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
813 uset, NFDBITS));
814 }
815
816 int
817 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
818 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
819 {
820 int error;
821
822 if (uset != NULL) {
823 error = kern_sigprocmask(td, SIG_SETMASK, uset,
824 &td->td_oldsigmask, 0);
825 if (error != 0)
826 return (error);
827 td->td_pflags |= TDP_OLDMASK;
828 /*
829 * Make sure that ast() is called on return to
830 * usermode and TDP_OLDMASK is cleared, restoring old
831 * sigmask.
832 */
833 thread_lock(td);
834 td->td_flags |= TDF_ASTPENDING;
835 thread_unlock(td);
836 }
837 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
838 return (error);
839 }
840
841 #ifndef _SYS_SYSPROTO_H_
842 struct select_args {
843 int nd;
844 fd_set *in, *ou, *ex;
845 struct timeval *tv;
846 };
847 #endif
848 int
849 sys_select(struct thread *td, struct select_args *uap)
850 {
851 struct timeval tv, *tvp;
852 int error;
853
854 if (uap->tv != NULL) {
855 error = copyin(uap->tv, &tv, sizeof(tv));
856 if (error)
857 return (error);
858 tvp = &tv;
859 } else
860 tvp = NULL;
861
862 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
863 NFDBITS));
864 }
865
866 /*
867 * In the unlikely case when user specified n greater then the last
868 * open file descriptor, check that no bits are set after the last
869 * valid fd. We must return EBADF if any is set.
870 *
871 * There are applications that rely on the behaviour.
872 *
873 * nd is fd_lastfile + 1.
874 */
875 static int
876 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
877 {
878 char *addr, *oaddr;
879 int b, i, res;
880 uint8_t bits;
881
882 if (nd >= ndu || fd_in == NULL)
883 return (0);
884
885 oaddr = NULL;
886 bits = 0; /* silence gcc */
887 for (i = nd; i < ndu; i++) {
888 b = i / NBBY;
889 #if BYTE_ORDER == LITTLE_ENDIAN
890 addr = (char *)fd_in + b;
891 #else
892 addr = (char *)fd_in;
893 if (abi_nfdbits == NFDBITS) {
894 addr += rounddown(b, sizeof(fd_mask)) +
895 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
896 } else {
897 addr += rounddown(b, sizeof(uint32_t)) +
898 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
899 }
900 #endif
901 if (addr != oaddr) {
902 res = fubyte(addr);
903 if (res == -1)
904 return (EFAULT);
905 oaddr = addr;
906 bits = res;
907 }
908 if ((bits & (1 << (i % NBBY))) != 0)
909 return (EBADF);
910 }
911 return (0);
912 }
913
914 int
915 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
916 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
917 {
918 struct filedesc *fdp;
919 /*
920 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
921 * infds with the new FD_SETSIZE of 1024, and more than enough for
922 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
923 * of 256.
924 */
925 fd_mask s_selbits[howmany(2048, NFDBITS)];
926 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
927 struct timeval atv, rtv, ttv;
928 int error, lf, ndu, timo;
929 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
930
931 if (nd < 0)
932 return (EINVAL);
933 fdp = td->td_proc->p_fd;
934 ndu = nd;
935 lf = fdp->fd_lastfile;
936 if (nd > lf + 1)
937 nd = lf + 1;
938
939 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
940 if (error != 0)
941 return (error);
942 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
943 if (error != 0)
944 return (error);
945 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
946 if (error != 0)
947 return (error);
948
949 /*
950 * Allocate just enough bits for the non-null fd_sets. Use the
951 * preallocated auto buffer if possible.
952 */
953 nfdbits = roundup(nd, NFDBITS);
954 ncpbytes = nfdbits / NBBY;
955 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
956 nbufbytes = 0;
957 if (fd_in != NULL)
958 nbufbytes += 2 * ncpbytes;
959 if (fd_ou != NULL)
960 nbufbytes += 2 * ncpbytes;
961 if (fd_ex != NULL)
962 nbufbytes += 2 * ncpbytes;
963 if (nbufbytes <= sizeof s_selbits)
964 selbits = &s_selbits[0];
965 else
966 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
967
968 /*
969 * Assign pointers into the bit buffers and fetch the input bits.
970 * Put the output buffers together so that they can be bzeroed
971 * together.
972 */
973 sbp = selbits;
974 #define getbits(name, x) \
975 do { \
976 if (name == NULL) { \
977 ibits[x] = NULL; \
978 obits[x] = NULL; \
979 } else { \
980 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
981 obits[x] = sbp; \
982 sbp += ncpbytes / sizeof *sbp; \
983 error = copyin(name, ibits[x], ncpubytes); \
984 if (error != 0) \
985 goto done; \
986 bzero((char *)ibits[x] + ncpubytes, \
987 ncpbytes - ncpubytes); \
988 } \
989 } while (0)
990 getbits(fd_in, 0);
991 getbits(fd_ou, 1);
992 getbits(fd_ex, 2);
993 #undef getbits
994
995 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
996 /*
997 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
998 * we are running under 32-bit emulation. This should be more
999 * generic.
1000 */
1001 #define swizzle_fdset(bits) \
1002 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1003 int i; \
1004 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1005 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1006 }
1007 #else
1008 #define swizzle_fdset(bits)
1009 #endif
1010
1011 /* Make sure the bit order makes it through an ABI transition */
1012 swizzle_fdset(ibits[0]);
1013 swizzle_fdset(ibits[1]);
1014 swizzle_fdset(ibits[2]);
1015
1016 if (nbufbytes != 0)
1017 bzero(selbits, nbufbytes / 2);
1018
1019 if (tvp != NULL) {
1020 atv = *tvp;
1021 if (itimerfix(&atv)) {
1022 error = EINVAL;
1023 goto done;
1024 }
1025 getmicrouptime(&rtv);
1026 timevaladd(&atv, &rtv);
1027 } else {
1028 atv.tv_sec = 0;
1029 atv.tv_usec = 0;
1030 }
1031 timo = 0;
1032 seltdinit(td);
1033 /* Iterate until the timeout expires or descriptors become ready. */
1034 for (;;) {
1035 error = selscan(td, ibits, obits, nd);
1036 if (error || td->td_retval[0] != 0)
1037 break;
1038 if (atv.tv_sec || atv.tv_usec) {
1039 getmicrouptime(&rtv);
1040 if (timevalcmp(&rtv, &atv, >=))
1041 break;
1042 ttv = atv;
1043 timevalsub(&ttv, &rtv);
1044 timo = ttv.tv_sec > 24 * 60 * 60 ?
1045 24 * 60 * 60 * hz : tvtohz(&ttv);
1046 }
1047 error = seltdwait(td, timo);
1048 if (error)
1049 break;
1050 error = selrescan(td, ibits, obits);
1051 if (error || td->td_retval[0] != 0)
1052 break;
1053 }
1054 seltdclear(td);
1055
1056 done:
1057 /* select is not restarted after signals... */
1058 if (error == ERESTART)
1059 error = EINTR;
1060 if (error == EWOULDBLOCK)
1061 error = 0;
1062
1063 /* swizzle bit order back, if necessary */
1064 swizzle_fdset(obits[0]);
1065 swizzle_fdset(obits[1]);
1066 swizzle_fdset(obits[2]);
1067 #undef swizzle_fdset
1068
1069 #define putbits(name, x) \
1070 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1071 error = error2;
1072 if (error == 0) {
1073 int error2;
1074
1075 putbits(fd_in, 0);
1076 putbits(fd_ou, 1);
1077 putbits(fd_ex, 2);
1078 #undef putbits
1079 }
1080 if (selbits != &s_selbits[0])
1081 free(selbits, M_SELECT);
1082
1083 return (error);
1084 }
1085 /*
1086 * Convert a select bit set to poll flags.
1087 *
1088 * The backend always returns POLLHUP/POLLERR if appropriate and we
1089 * return this as a set bit in any set.
1090 */
1091 static int select_flags[3] = {
1092 POLLRDNORM | POLLHUP | POLLERR,
1093 POLLWRNORM | POLLHUP | POLLERR,
1094 POLLRDBAND | POLLERR
1095 };
1096
1097 /*
1098 * Compute the fo_poll flags required for a fd given by the index and
1099 * bit position in the fd_mask array.
1100 */
1101 static __inline int
1102 selflags(fd_mask **ibits, int idx, fd_mask bit)
1103 {
1104 int flags;
1105 int msk;
1106
1107 flags = 0;
1108 for (msk = 0; msk < 3; msk++) {
1109 if (ibits[msk] == NULL)
1110 continue;
1111 if ((ibits[msk][idx] & bit) == 0)
1112 continue;
1113 flags |= select_flags[msk];
1114 }
1115 return (flags);
1116 }
1117
1118 /*
1119 * Set the appropriate output bits given a mask of fired events and the
1120 * input bits originally requested.
1121 */
1122 static __inline int
1123 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1124 {
1125 int msk;
1126 int n;
1127
1128 n = 0;
1129 for (msk = 0; msk < 3; msk++) {
1130 if ((events & select_flags[msk]) == 0)
1131 continue;
1132 if (ibits[msk] == NULL)
1133 continue;
1134 if ((ibits[msk][idx] & bit) == 0)
1135 continue;
1136 /*
1137 * XXX Check for a duplicate set. This can occur because a
1138 * socket calls selrecord() twice for each poll() call
1139 * resulting in two selfds per real fd. selrescan() will
1140 * call selsetbits twice as a result.
1141 */
1142 if ((obits[msk][idx] & bit) != 0)
1143 continue;
1144 obits[msk][idx] |= bit;
1145 n++;
1146 }
1147
1148 return (n);
1149 }
1150
1151 static __inline int
1152 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
1153 {
1154 struct file *fp;
1155 #ifdef CAPABILITIES
1156 struct file *fp_fromcap;
1157 int error;
1158 #endif
1159
1160 if ((fp = fget_unlocked(fdp, fd)) == NULL)
1161 return (EBADF);
1162 #ifdef CAPABILITIES
1163 /*
1164 * If the file descriptor is for a capability, test rights and use
1165 * the file descriptor references by the capability.
1166 */
1167 error = cap_funwrap(fp, CAP_POLL_EVENT, &fp_fromcap);
1168 if (error) {
1169 fdrop(fp, curthread);
1170 return (error);
1171 }
1172 if (fp != fp_fromcap) {
1173 fhold(fp_fromcap);
1174 fdrop(fp, curthread);
1175 fp = fp_fromcap;
1176 }
1177 #endif /* CAPABILITIES */
1178 *fpp = fp;
1179 return (0);
1180 }
1181
1182 /*
1183 * Traverse the list of fds attached to this thread's seltd and check for
1184 * completion.
1185 */
1186 static int
1187 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1188 {
1189 struct filedesc *fdp;
1190 struct selinfo *si;
1191 struct seltd *stp;
1192 struct selfd *sfp;
1193 struct selfd *sfn;
1194 struct file *fp;
1195 fd_mask bit;
1196 int fd, ev, n, idx;
1197 int error;
1198
1199 fdp = td->td_proc->p_fd;
1200 stp = td->td_sel;
1201 n = 0;
1202 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1203 fd = (int)(uintptr_t)sfp->sf_cookie;
1204 si = sfp->sf_si;
1205 selfdfree(stp, sfp);
1206 /* If the selinfo wasn't cleared the event didn't fire. */
1207 if (si != NULL)
1208 continue;
1209 error = getselfd_cap(fdp, fd, &fp);
1210 if (error)
1211 return (error);
1212 idx = fd / NFDBITS;
1213 bit = (fd_mask)1 << (fd % NFDBITS);
1214 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1215 fdrop(fp, td);
1216 if (ev != 0)
1217 n += selsetbits(ibits, obits, idx, bit, ev);
1218 }
1219 stp->st_flags = 0;
1220 td->td_retval[0] = n;
1221 return (0);
1222 }
1223
1224 /*
1225 * Perform the initial filedescriptor scan and register ourselves with
1226 * each selinfo.
1227 */
1228 static int
1229 selscan(td, ibits, obits, nfd)
1230 struct thread *td;
1231 fd_mask **ibits, **obits;
1232 int nfd;
1233 {
1234 struct filedesc *fdp;
1235 struct file *fp;
1236 fd_mask bit;
1237 int ev, flags, end, fd;
1238 int n, idx;
1239 int error;
1240
1241 fdp = td->td_proc->p_fd;
1242 n = 0;
1243 for (idx = 0, fd = 0; fd < nfd; idx++) {
1244 end = imin(fd + NFDBITS, nfd);
1245 for (bit = 1; fd < end; bit <<= 1, fd++) {
1246 /* Compute the list of events we're interested in. */
1247 flags = selflags(ibits, idx, bit);
1248 if (flags == 0)
1249 continue;
1250 error = getselfd_cap(fdp, fd, &fp);
1251 if (error)
1252 return (error);
1253 selfdalloc(td, (void *)(uintptr_t)fd);
1254 ev = fo_poll(fp, flags, td->td_ucred, td);
1255 fdrop(fp, td);
1256 if (ev != 0)
1257 n += selsetbits(ibits, obits, idx, bit, ev);
1258 }
1259 }
1260
1261 td->td_retval[0] = n;
1262 return (0);
1263 }
1264
1265 #ifndef _SYS_SYSPROTO_H_
1266 struct poll_args {
1267 struct pollfd *fds;
1268 u_int nfds;
1269 int timeout;
1270 };
1271 #endif
1272 int
1273 sys_poll(td, uap)
1274 struct thread *td;
1275 struct poll_args *uap;
1276 {
1277 struct pollfd *bits;
1278 struct pollfd smallbits[32];
1279 struct timeval atv, rtv, ttv;
1280 int error, timo;
1281 u_int nfds;
1282 size_t ni;
1283
1284 nfds = uap->nfds;
1285 if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1286 return (EINVAL);
1287 ni = nfds * sizeof(struct pollfd);
1288 if (ni > sizeof(smallbits))
1289 bits = malloc(ni, M_TEMP, M_WAITOK);
1290 else
1291 bits = smallbits;
1292 error = copyin(uap->fds, bits, ni);
1293 if (error)
1294 goto done;
1295 if (uap->timeout != INFTIM) {
1296 atv.tv_sec = uap->timeout / 1000;
1297 atv.tv_usec = (uap->timeout % 1000) * 1000;
1298 if (itimerfix(&atv)) {
1299 error = EINVAL;
1300 goto done;
1301 }
1302 getmicrouptime(&rtv);
1303 timevaladd(&atv, &rtv);
1304 } else {
1305 atv.tv_sec = 0;
1306 atv.tv_usec = 0;
1307 }
1308 timo = 0;
1309 seltdinit(td);
1310 /* Iterate until the timeout expires or descriptors become ready. */
1311 for (;;) {
1312 error = pollscan(td, bits, nfds);
1313 if (error || td->td_retval[0] != 0)
1314 break;
1315 if (atv.tv_sec || atv.tv_usec) {
1316 getmicrouptime(&rtv);
1317 if (timevalcmp(&rtv, &atv, >=))
1318 break;
1319 ttv = atv;
1320 timevalsub(&ttv, &rtv);
1321 timo = ttv.tv_sec > 24 * 60 * 60 ?
1322 24 * 60 * 60 * hz : tvtohz(&ttv);
1323 }
1324 error = seltdwait(td, timo);
1325 if (error)
1326 break;
1327 error = pollrescan(td);
1328 if (error || td->td_retval[0] != 0)
1329 break;
1330 }
1331 seltdclear(td);
1332
1333 done:
1334 /* poll is not restarted after signals... */
1335 if (error == ERESTART)
1336 error = EINTR;
1337 if (error == EWOULDBLOCK)
1338 error = 0;
1339 if (error == 0) {
1340 error = pollout(td, bits, uap->fds, nfds);
1341 if (error)
1342 goto out;
1343 }
1344 out:
1345 if (ni > sizeof(smallbits))
1346 free(bits, M_TEMP);
1347 return (error);
1348 }
1349
1350 static int
1351 pollrescan(struct thread *td)
1352 {
1353 struct seltd *stp;
1354 struct selfd *sfp;
1355 struct selfd *sfn;
1356 struct selinfo *si;
1357 struct filedesc *fdp;
1358 struct file *fp;
1359 struct pollfd *fd;
1360 int n;
1361
1362 n = 0;
1363 fdp = td->td_proc->p_fd;
1364 stp = td->td_sel;
1365 FILEDESC_SLOCK(fdp);
1366 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1367 fd = (struct pollfd *)sfp->sf_cookie;
1368 si = sfp->sf_si;
1369 selfdfree(stp, sfp);
1370 /* If the selinfo wasn't cleared the event didn't fire. */
1371 if (si != NULL)
1372 continue;
1373 fp = fdp->fd_ofiles[fd->fd];
1374 #ifdef CAPABILITIES
1375 if ((fp == NULL)
1376 || (cap_funwrap(fp, CAP_POLL_EVENT, &fp) != 0)) {
1377 #else
1378 if (fp == NULL) {
1379 #endif
1380 fd->revents = POLLNVAL;
1381 n++;
1382 continue;
1383 }
1384
1385 /*
1386 * Note: backend also returns POLLHUP and
1387 * POLLERR if appropriate.
1388 */
1389 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1390 if (fd->revents != 0)
1391 n++;
1392 }
1393 FILEDESC_SUNLOCK(fdp);
1394 stp->st_flags = 0;
1395 td->td_retval[0] = n;
1396 return (0);
1397 }
1398
1399
1400 static int
1401 pollout(td, fds, ufds, nfd)
1402 struct thread *td;
1403 struct pollfd *fds;
1404 struct pollfd *ufds;
1405 u_int nfd;
1406 {
1407 int error = 0;
1408 u_int i = 0;
1409 u_int n = 0;
1410
1411 for (i = 0; i < nfd; i++) {
1412 error = copyout(&fds->revents, &ufds->revents,
1413 sizeof(ufds->revents));
1414 if (error)
1415 return (error);
1416 if (fds->revents != 0)
1417 n++;
1418 fds++;
1419 ufds++;
1420 }
1421 td->td_retval[0] = n;
1422 return (0);
1423 }
1424
1425 static int
1426 pollscan(td, fds, nfd)
1427 struct thread *td;
1428 struct pollfd *fds;
1429 u_int nfd;
1430 {
1431 struct filedesc *fdp = td->td_proc->p_fd;
1432 int i;
1433 struct file *fp;
1434 int n = 0;
1435
1436 FILEDESC_SLOCK(fdp);
1437 for (i = 0; i < nfd; i++, fds++) {
1438 if (fds->fd >= fdp->fd_nfiles) {
1439 fds->revents = POLLNVAL;
1440 n++;
1441 } else if (fds->fd < 0) {
1442 fds->revents = 0;
1443 } else {
1444 fp = fdp->fd_ofiles[fds->fd];
1445 #ifdef CAPABILITIES
1446 if ((fp == NULL)
1447 || (cap_funwrap(fp, CAP_POLL_EVENT, &fp) != 0)) {
1448 #else
1449 if (fp == NULL) {
1450 #endif
1451 fds->revents = POLLNVAL;
1452 n++;
1453 } else {
1454 /*
1455 * Note: backend also returns POLLHUP and
1456 * POLLERR if appropriate.
1457 */
1458 selfdalloc(td, fds);
1459 fds->revents = fo_poll(fp, fds->events,
1460 td->td_ucred, td);
1461 /*
1462 * POSIX requires POLLOUT to be never
1463 * set simultaneously with POLLHUP.
1464 */
1465 if ((fds->revents & POLLHUP) != 0)
1466 fds->revents &= ~POLLOUT;
1467
1468 if (fds->revents != 0)
1469 n++;
1470 }
1471 }
1472 }
1473 FILEDESC_SUNLOCK(fdp);
1474 td->td_retval[0] = n;
1475 return (0);
1476 }
1477
1478 /*
1479 * OpenBSD poll system call.
1480 *
1481 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1482 */
1483 #ifndef _SYS_SYSPROTO_H_
1484 struct openbsd_poll_args {
1485 struct pollfd *fds;
1486 u_int nfds;
1487 int timeout;
1488 };
1489 #endif
1490 int
1491 sys_openbsd_poll(td, uap)
1492 register struct thread *td;
1493 register struct openbsd_poll_args *uap;
1494 {
1495 return (sys_poll(td, (struct poll_args *)uap));
1496 }
1497
1498 /*
1499 * XXX This was created specifically to support netncp and netsmb. This
1500 * allows the caller to specify a socket to wait for events on. It returns
1501 * 0 if any events matched and an error otherwise. There is no way to
1502 * determine which events fired.
1503 */
1504 int
1505 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1506 {
1507 struct timeval atv, rtv, ttv;
1508 int error, timo;
1509
1510 if (tvp != NULL) {
1511 atv = *tvp;
1512 if (itimerfix(&atv))
1513 return (EINVAL);
1514 getmicrouptime(&rtv);
1515 timevaladd(&atv, &rtv);
1516 } else {
1517 atv.tv_sec = 0;
1518 atv.tv_usec = 0;
1519 }
1520
1521 timo = 0;
1522 seltdinit(td);
1523 /*
1524 * Iterate until the timeout expires or the socket becomes ready.
1525 */
1526 for (;;) {
1527 selfdalloc(td, NULL);
1528 error = sopoll(so, events, NULL, td);
1529 /* error here is actually the ready events. */
1530 if (error)
1531 return (0);
1532 if (atv.tv_sec || atv.tv_usec) {
1533 getmicrouptime(&rtv);
1534 if (timevalcmp(&rtv, &atv, >=)) {
1535 seltdclear(td);
1536 return (EWOULDBLOCK);
1537 }
1538 ttv = atv;
1539 timevalsub(&ttv, &rtv);
1540 timo = ttv.tv_sec > 24 * 60 * 60 ?
1541 24 * 60 * 60 * hz : tvtohz(&ttv);
1542 }
1543 error = seltdwait(td, timo);
1544 seltdclear(td);
1545 if (error)
1546 break;
1547 }
1548 /* XXX Duplicates ncp/smb behavior. */
1549 if (error == ERESTART)
1550 error = 0;
1551 return (error);
1552 }
1553
1554 /*
1555 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1556 * have two select sets, one for read and another for write.
1557 */
1558 static void
1559 selfdalloc(struct thread *td, void *cookie)
1560 {
1561 struct seltd *stp;
1562
1563 stp = td->td_sel;
1564 if (stp->st_free1 == NULL)
1565 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1566 stp->st_free1->sf_td = stp;
1567 stp->st_free1->sf_cookie = cookie;
1568 if (stp->st_free2 == NULL)
1569 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1570 stp->st_free2->sf_td = stp;
1571 stp->st_free2->sf_cookie = cookie;
1572 }
1573
1574 static void
1575 selfdfree(struct seltd *stp, struct selfd *sfp)
1576 {
1577 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1578 mtx_lock(sfp->sf_mtx);
1579 if (sfp->sf_si)
1580 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1581 mtx_unlock(sfp->sf_mtx);
1582 uma_zfree(selfd_zone, sfp);
1583 }
1584
1585 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1586 void
1587 seldrain(sip)
1588 struct selinfo *sip;
1589 {
1590
1591 /*
1592 * This feature is already provided by doselwakeup(), thus it is
1593 * enough to go for it.
1594 * Eventually, the context, should take care to avoid races
1595 * between thread calling select()/poll() and file descriptor
1596 * detaching, but, again, the races are just the same as
1597 * selwakeup().
1598 */
1599 doselwakeup(sip, -1);
1600 }
1601
1602 /*
1603 * Record a select request.
1604 */
1605 void
1606 selrecord(selector, sip)
1607 struct thread *selector;
1608 struct selinfo *sip;
1609 {
1610 struct selfd *sfp;
1611 struct seltd *stp;
1612 struct mtx *mtxp;
1613
1614 stp = selector->td_sel;
1615 /*
1616 * Don't record when doing a rescan.
1617 */
1618 if (stp->st_flags & SELTD_RESCAN)
1619 return;
1620 /*
1621 * Grab one of the preallocated descriptors.
1622 */
1623 sfp = NULL;
1624 if ((sfp = stp->st_free1) != NULL)
1625 stp->st_free1 = NULL;
1626 else if ((sfp = stp->st_free2) != NULL)
1627 stp->st_free2 = NULL;
1628 else
1629 panic("selrecord: No free selfd on selq");
1630 mtxp = sip->si_mtx;
1631 if (mtxp == NULL)
1632 mtxp = mtx_pool_find(mtxpool_select, sip);
1633 /*
1634 * Initialize the sfp and queue it in the thread.
1635 */
1636 sfp->sf_si = sip;
1637 sfp->sf_mtx = mtxp;
1638 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1639 /*
1640 * Now that we've locked the sip, check for initialization.
1641 */
1642 mtx_lock(mtxp);
1643 if (sip->si_mtx == NULL) {
1644 sip->si_mtx = mtxp;
1645 TAILQ_INIT(&sip->si_tdlist);
1646 }
1647 /*
1648 * Add this thread to the list of selfds listening on this selinfo.
1649 */
1650 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1651 mtx_unlock(sip->si_mtx);
1652 }
1653
1654 /* Wake up a selecting thread. */
1655 void
1656 selwakeup(sip)
1657 struct selinfo *sip;
1658 {
1659 doselwakeup(sip, -1);
1660 }
1661
1662 /* Wake up a selecting thread, and set its priority. */
1663 void
1664 selwakeuppri(sip, pri)
1665 struct selinfo *sip;
1666 int pri;
1667 {
1668 doselwakeup(sip, pri);
1669 }
1670
1671 /*
1672 * Do a wakeup when a selectable event occurs.
1673 */
1674 static void
1675 doselwakeup(sip, pri)
1676 struct selinfo *sip;
1677 int pri;
1678 {
1679 struct selfd *sfp;
1680 struct selfd *sfn;
1681 struct seltd *stp;
1682
1683 /* If it's not initialized there can't be any waiters. */
1684 if (sip->si_mtx == NULL)
1685 return;
1686 /*
1687 * Locking the selinfo locks all selfds associated with it.
1688 */
1689 mtx_lock(sip->si_mtx);
1690 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1691 /*
1692 * Once we remove this sfp from the list and clear the
1693 * sf_si seltdclear will know to ignore this si.
1694 */
1695 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1696 sfp->sf_si = NULL;
1697 stp = sfp->sf_td;
1698 mtx_lock(&stp->st_mtx);
1699 stp->st_flags |= SELTD_PENDING;
1700 cv_broadcastpri(&stp->st_wait, pri);
1701 mtx_unlock(&stp->st_mtx);
1702 }
1703 mtx_unlock(sip->si_mtx);
1704 }
1705
1706 static void
1707 seltdinit(struct thread *td)
1708 {
1709 struct seltd *stp;
1710
1711 if ((stp = td->td_sel) != NULL)
1712 goto out;
1713 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1714 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1715 cv_init(&stp->st_wait, "select");
1716 out:
1717 stp->st_flags = 0;
1718 STAILQ_INIT(&stp->st_selq);
1719 }
1720
1721 static int
1722 seltdwait(struct thread *td, int timo)
1723 {
1724 struct seltd *stp;
1725 int error;
1726
1727 stp = td->td_sel;
1728 /*
1729 * An event of interest may occur while we do not hold the seltd
1730 * locked so check the pending flag before we sleep.
1731 */
1732 mtx_lock(&stp->st_mtx);
1733 /*
1734 * Any further calls to selrecord will be a rescan.
1735 */
1736 stp->st_flags |= SELTD_RESCAN;
1737 if (stp->st_flags & SELTD_PENDING) {
1738 mtx_unlock(&stp->st_mtx);
1739 return (0);
1740 }
1741 if (timo > 0)
1742 error = cv_timedwait_sig(&stp->st_wait, &stp->st_mtx, timo);
1743 else
1744 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1745 mtx_unlock(&stp->st_mtx);
1746
1747 return (error);
1748 }
1749
1750 void
1751 seltdfini(struct thread *td)
1752 {
1753 struct seltd *stp;
1754
1755 stp = td->td_sel;
1756 if (stp == NULL)
1757 return;
1758 if (stp->st_free1)
1759 uma_zfree(selfd_zone, stp->st_free1);
1760 if (stp->st_free2)
1761 uma_zfree(selfd_zone, stp->st_free2);
1762 td->td_sel = NULL;
1763 free(stp, M_SELECT);
1764 }
1765
1766 /*
1767 * Remove the references to the thread from all of the objects we were
1768 * polling.
1769 */
1770 static void
1771 seltdclear(struct thread *td)
1772 {
1773 struct seltd *stp;
1774 struct selfd *sfp;
1775 struct selfd *sfn;
1776
1777 stp = td->td_sel;
1778 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1779 selfdfree(stp, sfp);
1780 stp->st_flags = 0;
1781 }
1782
1783 static void selectinit(void *);
1784 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1785 static void
1786 selectinit(void *dummy __unused)
1787 {
1788
1789 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL,
1790 NULL, NULL, UMA_ALIGN_PTR, 0);
1791 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
1792 }
Cache object: bc4ce937c4ddb0bccc79418e95ddf129
|