1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)sys_generic.c 8.5 (Berkeley) 1/21/94
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/10.1/sys/kern/sys_generic.c 268338 2014-07-06 22:54:17Z mjg $");
39
40 #include "opt_capsicum.h"
41 #include "opt_compat.h"
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/capability.h>
48 #include <sys/filedesc.h>
49 #include <sys/filio.h>
50 #include <sys/fcntl.h>
51 #include <sys/file.h>
52 #include <sys/lock.h>
53 #include <sys/proc.h>
54 #include <sys/signalvar.h>
55 #include <sys/socketvar.h>
56 #include <sys/uio.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/limits.h>
60 #include <sys/malloc.h>
61 #include <sys/poll.h>
62 #include <sys/resourcevar.h>
63 #include <sys/selinfo.h>
64 #include <sys/sleepqueue.h>
65 #include <sys/syscallsubr.h>
66 #include <sys/sysctl.h>
67 #include <sys/sysent.h>
68 #include <sys/vnode.h>
69 #include <sys/bio.h>
70 #include <sys/buf.h>
71 #include <sys/condvar.h>
72 #ifdef KTRACE
73 #include <sys/ktrace.h>
74 #endif
75
76 #include <security/audit/audit.h>
77
78 int iosize_max_clamp = 1;
79 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
80 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
81 int devfs_iosize_max_clamp = 1;
82 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
83 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
84
85 /*
86 * Assert that the return value of read(2) and write(2) syscalls fits
87 * into a register. If not, an architecture will need to provide the
88 * usermode wrappers to reconstruct the result.
89 */
90 CTASSERT(sizeof(register_t) >= sizeof(size_t));
91
92 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
93 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
94 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
95
96 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
97 u_int);
98 static int pollscan(struct thread *, struct pollfd *, u_int);
99 static int pollrescan(struct thread *);
100 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
101 static int selrescan(struct thread *, fd_mask **, fd_mask **);
102 static void selfdalloc(struct thread *, void *);
103 static void selfdfree(struct seltd *, struct selfd *);
104 static int dofileread(struct thread *, int, struct file *, struct uio *,
105 off_t, int);
106 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
107 off_t, int);
108 static void doselwakeup(struct selinfo *, int);
109 static void seltdinit(struct thread *);
110 static int seltdwait(struct thread *, sbintime_t, sbintime_t);
111 static void seltdclear(struct thread *);
112
113 /*
114 * One seltd per-thread allocated on demand as needed.
115 *
116 * t - protected by st_mtx
117 * k - Only accessed by curthread or read-only
118 */
119 struct seltd {
120 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
121 struct selfd *st_free1; /* (k) free fd for read set. */
122 struct selfd *st_free2; /* (k) free fd for write set. */
123 struct mtx st_mtx; /* Protects struct seltd */
124 struct cv st_wait; /* (t) Wait channel. */
125 int st_flags; /* (t) SELTD_ flags. */
126 };
127
128 #define SELTD_PENDING 0x0001 /* We have pending events. */
129 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
130
131 /*
132 * One selfd allocated per-thread per-file-descriptor.
133 * f - protected by sf_mtx
134 */
135 struct selfd {
136 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
137 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
138 struct selinfo *sf_si; /* (f) selinfo when linked. */
139 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
140 struct seltd *sf_td; /* (k) owning seltd. */
141 void *sf_cookie; /* (k) fd or pollfd. */
142 };
143
144 static uma_zone_t selfd_zone;
145 static struct mtx_pool *mtxpool_select;
146
147 #ifndef _SYS_SYSPROTO_H_
148 struct read_args {
149 int fd;
150 void *buf;
151 size_t nbyte;
152 };
153 #endif
154 int
155 sys_read(td, uap)
156 struct thread *td;
157 struct read_args *uap;
158 {
159 struct uio auio;
160 struct iovec aiov;
161 int error;
162
163 if (uap->nbyte > IOSIZE_MAX)
164 return (EINVAL);
165 aiov.iov_base = uap->buf;
166 aiov.iov_len = uap->nbyte;
167 auio.uio_iov = &aiov;
168 auio.uio_iovcnt = 1;
169 auio.uio_resid = uap->nbyte;
170 auio.uio_segflg = UIO_USERSPACE;
171 error = kern_readv(td, uap->fd, &auio);
172 return(error);
173 }
174
175 /*
176 * Positioned read system call
177 */
178 #ifndef _SYS_SYSPROTO_H_
179 struct pread_args {
180 int fd;
181 void *buf;
182 size_t nbyte;
183 int pad;
184 off_t offset;
185 };
186 #endif
187 int
188 sys_pread(td, uap)
189 struct thread *td;
190 struct pread_args *uap;
191 {
192 struct uio auio;
193 struct iovec aiov;
194 int error;
195
196 if (uap->nbyte > IOSIZE_MAX)
197 return (EINVAL);
198 aiov.iov_base = uap->buf;
199 aiov.iov_len = uap->nbyte;
200 auio.uio_iov = &aiov;
201 auio.uio_iovcnt = 1;
202 auio.uio_resid = uap->nbyte;
203 auio.uio_segflg = UIO_USERSPACE;
204 error = kern_preadv(td, uap->fd, &auio, uap->offset);
205 return(error);
206 }
207
208 int
209 freebsd6_pread(td, uap)
210 struct thread *td;
211 struct freebsd6_pread_args *uap;
212 {
213 struct pread_args oargs;
214
215 oargs.fd = uap->fd;
216 oargs.buf = uap->buf;
217 oargs.nbyte = uap->nbyte;
218 oargs.offset = uap->offset;
219 return (sys_pread(td, &oargs));
220 }
221
222 /*
223 * Scatter read system call.
224 */
225 #ifndef _SYS_SYSPROTO_H_
226 struct readv_args {
227 int fd;
228 struct iovec *iovp;
229 u_int iovcnt;
230 };
231 #endif
232 int
233 sys_readv(struct thread *td, struct readv_args *uap)
234 {
235 struct uio *auio;
236 int error;
237
238 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
239 if (error)
240 return (error);
241 error = kern_readv(td, uap->fd, auio);
242 free(auio, M_IOV);
243 return (error);
244 }
245
246 int
247 kern_readv(struct thread *td, int fd, struct uio *auio)
248 {
249 struct file *fp;
250 cap_rights_t rights;
251 int error;
252
253 error = fget_read(td, fd, cap_rights_init(&rights, CAP_READ), &fp);
254 if (error)
255 return (error);
256 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
257 fdrop(fp, td);
258 return (error);
259 }
260
261 /*
262 * Scatter positioned read system call.
263 */
264 #ifndef _SYS_SYSPROTO_H_
265 struct preadv_args {
266 int fd;
267 struct iovec *iovp;
268 u_int iovcnt;
269 off_t offset;
270 };
271 #endif
272 int
273 sys_preadv(struct thread *td, struct preadv_args *uap)
274 {
275 struct uio *auio;
276 int error;
277
278 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
279 if (error)
280 return (error);
281 error = kern_preadv(td, uap->fd, auio, uap->offset);
282 free(auio, M_IOV);
283 return (error);
284 }
285
286 int
287 kern_preadv(td, fd, auio, offset)
288 struct thread *td;
289 int fd;
290 struct uio *auio;
291 off_t offset;
292 {
293 struct file *fp;
294 cap_rights_t rights;
295 int error;
296
297 error = fget_read(td, fd, cap_rights_init(&rights, CAP_PREAD), &fp);
298 if (error)
299 return (error);
300 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
301 error = ESPIPE;
302 else if (offset < 0 && fp->f_vnode->v_type != VCHR)
303 error = EINVAL;
304 else
305 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
306 fdrop(fp, td);
307 return (error);
308 }
309
310 /*
311 * Common code for readv and preadv that reads data in
312 * from a file using the passed in uio, offset, and flags.
313 */
314 static int
315 dofileread(td, fd, fp, auio, offset, flags)
316 struct thread *td;
317 int fd;
318 struct file *fp;
319 struct uio *auio;
320 off_t offset;
321 int flags;
322 {
323 ssize_t cnt;
324 int error;
325 #ifdef KTRACE
326 struct uio *ktruio = NULL;
327 #endif
328
329 /* Finish zero length reads right here */
330 if (auio->uio_resid == 0) {
331 td->td_retval[0] = 0;
332 return(0);
333 }
334 auio->uio_rw = UIO_READ;
335 auio->uio_offset = offset;
336 auio->uio_td = td;
337 #ifdef KTRACE
338 if (KTRPOINT(td, KTR_GENIO))
339 ktruio = cloneuio(auio);
340 #endif
341 cnt = auio->uio_resid;
342 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
343 if (auio->uio_resid != cnt && (error == ERESTART ||
344 error == EINTR || error == EWOULDBLOCK))
345 error = 0;
346 }
347 cnt -= auio->uio_resid;
348 #ifdef KTRACE
349 if (ktruio != NULL) {
350 ktruio->uio_resid = cnt;
351 ktrgenio(fd, UIO_READ, ktruio, error);
352 }
353 #endif
354 td->td_retval[0] = cnt;
355 return (error);
356 }
357
358 #ifndef _SYS_SYSPROTO_H_
359 struct write_args {
360 int fd;
361 const void *buf;
362 size_t nbyte;
363 };
364 #endif
365 int
366 sys_write(td, uap)
367 struct thread *td;
368 struct write_args *uap;
369 {
370 struct uio auio;
371 struct iovec aiov;
372 int error;
373
374 if (uap->nbyte > IOSIZE_MAX)
375 return (EINVAL);
376 aiov.iov_base = (void *)(uintptr_t)uap->buf;
377 aiov.iov_len = uap->nbyte;
378 auio.uio_iov = &aiov;
379 auio.uio_iovcnt = 1;
380 auio.uio_resid = uap->nbyte;
381 auio.uio_segflg = UIO_USERSPACE;
382 error = kern_writev(td, uap->fd, &auio);
383 return(error);
384 }
385
386 /*
387 * Positioned write system call.
388 */
389 #ifndef _SYS_SYSPROTO_H_
390 struct pwrite_args {
391 int fd;
392 const void *buf;
393 size_t nbyte;
394 int pad;
395 off_t offset;
396 };
397 #endif
398 int
399 sys_pwrite(td, uap)
400 struct thread *td;
401 struct pwrite_args *uap;
402 {
403 struct uio auio;
404 struct iovec aiov;
405 int error;
406
407 if (uap->nbyte > IOSIZE_MAX)
408 return (EINVAL);
409 aiov.iov_base = (void *)(uintptr_t)uap->buf;
410 aiov.iov_len = uap->nbyte;
411 auio.uio_iov = &aiov;
412 auio.uio_iovcnt = 1;
413 auio.uio_resid = uap->nbyte;
414 auio.uio_segflg = UIO_USERSPACE;
415 error = kern_pwritev(td, uap->fd, &auio, uap->offset);
416 return(error);
417 }
418
419 int
420 freebsd6_pwrite(td, uap)
421 struct thread *td;
422 struct freebsd6_pwrite_args *uap;
423 {
424 struct pwrite_args oargs;
425
426 oargs.fd = uap->fd;
427 oargs.buf = uap->buf;
428 oargs.nbyte = uap->nbyte;
429 oargs.offset = uap->offset;
430 return (sys_pwrite(td, &oargs));
431 }
432
433 /*
434 * Gather write system call.
435 */
436 #ifndef _SYS_SYSPROTO_H_
437 struct writev_args {
438 int fd;
439 struct iovec *iovp;
440 u_int iovcnt;
441 };
442 #endif
443 int
444 sys_writev(struct thread *td, struct writev_args *uap)
445 {
446 struct uio *auio;
447 int error;
448
449 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
450 if (error)
451 return (error);
452 error = kern_writev(td, uap->fd, auio);
453 free(auio, M_IOV);
454 return (error);
455 }
456
457 int
458 kern_writev(struct thread *td, int fd, struct uio *auio)
459 {
460 struct file *fp;
461 cap_rights_t rights;
462 int error;
463
464 error = fget_write(td, fd, cap_rights_init(&rights, CAP_WRITE), &fp);
465 if (error)
466 return (error);
467 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
468 fdrop(fp, td);
469 return (error);
470 }
471
472 /*
473 * Gather positioned write system call.
474 */
475 #ifndef _SYS_SYSPROTO_H_
476 struct pwritev_args {
477 int fd;
478 struct iovec *iovp;
479 u_int iovcnt;
480 off_t offset;
481 };
482 #endif
483 int
484 sys_pwritev(struct thread *td, struct pwritev_args *uap)
485 {
486 struct uio *auio;
487 int error;
488
489 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
490 if (error)
491 return (error);
492 error = kern_pwritev(td, uap->fd, auio, uap->offset);
493 free(auio, M_IOV);
494 return (error);
495 }
496
497 int
498 kern_pwritev(td, fd, auio, offset)
499 struct thread *td;
500 struct uio *auio;
501 int fd;
502 off_t offset;
503 {
504 struct file *fp;
505 cap_rights_t rights;
506 int error;
507
508 error = fget_write(td, fd, cap_rights_init(&rights, CAP_PWRITE), &fp);
509 if (error)
510 return (error);
511 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
512 error = ESPIPE;
513 else if (offset < 0 && fp->f_vnode->v_type != VCHR)
514 error = EINVAL;
515 else
516 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
517 fdrop(fp, td);
518 return (error);
519 }
520
521 /*
522 * Common code for writev and pwritev that writes data to
523 * a file using the passed in uio, offset, and flags.
524 */
525 static int
526 dofilewrite(td, fd, fp, auio, offset, flags)
527 struct thread *td;
528 int fd;
529 struct file *fp;
530 struct uio *auio;
531 off_t offset;
532 int flags;
533 {
534 ssize_t cnt;
535 int error;
536 #ifdef KTRACE
537 struct uio *ktruio = NULL;
538 #endif
539
540 auio->uio_rw = UIO_WRITE;
541 auio->uio_td = td;
542 auio->uio_offset = offset;
543 #ifdef KTRACE
544 if (KTRPOINT(td, KTR_GENIO))
545 ktruio = cloneuio(auio);
546 #endif
547 cnt = auio->uio_resid;
548 if (fp->f_type == DTYPE_VNODE &&
549 (fp->f_vnread_flags & FDEVFS_VNODE) == 0)
550 bwillwrite();
551 if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
552 if (auio->uio_resid != cnt && (error == ERESTART ||
553 error == EINTR || error == EWOULDBLOCK))
554 error = 0;
555 /* Socket layer is responsible for issuing SIGPIPE. */
556 if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
557 PROC_LOCK(td->td_proc);
558 tdsignal(td, SIGPIPE);
559 PROC_UNLOCK(td->td_proc);
560 }
561 }
562 cnt -= auio->uio_resid;
563 #ifdef KTRACE
564 if (ktruio != NULL) {
565 ktruio->uio_resid = cnt;
566 ktrgenio(fd, UIO_WRITE, ktruio, error);
567 }
568 #endif
569 td->td_retval[0] = cnt;
570 return (error);
571 }
572
573 /*
574 * Truncate a file given a file descriptor.
575 *
576 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
577 * descriptor isn't writable.
578 */
579 int
580 kern_ftruncate(td, fd, length)
581 struct thread *td;
582 int fd;
583 off_t length;
584 {
585 struct file *fp;
586 cap_rights_t rights;
587 int error;
588
589 AUDIT_ARG_FD(fd);
590 if (length < 0)
591 return (EINVAL);
592 error = fget(td, fd, cap_rights_init(&rights, CAP_FTRUNCATE), &fp);
593 if (error)
594 return (error);
595 AUDIT_ARG_FILE(td->td_proc, fp);
596 if (!(fp->f_flag & FWRITE)) {
597 fdrop(fp, td);
598 return (EINVAL);
599 }
600 error = fo_truncate(fp, length, td->td_ucred, td);
601 fdrop(fp, td);
602 return (error);
603 }
604
605 #ifndef _SYS_SYSPROTO_H_
606 struct ftruncate_args {
607 int fd;
608 int pad;
609 off_t length;
610 };
611 #endif
612 int
613 sys_ftruncate(td, uap)
614 struct thread *td;
615 struct ftruncate_args *uap;
616 {
617
618 return (kern_ftruncate(td, uap->fd, uap->length));
619 }
620
621 #if defined(COMPAT_43)
622 #ifndef _SYS_SYSPROTO_H_
623 struct oftruncate_args {
624 int fd;
625 long length;
626 };
627 #endif
628 int
629 oftruncate(td, uap)
630 struct thread *td;
631 struct oftruncate_args *uap;
632 {
633
634 return (kern_ftruncate(td, uap->fd, uap->length));
635 }
636 #endif /* COMPAT_43 */
637
638 #ifndef _SYS_SYSPROTO_H_
639 struct ioctl_args {
640 int fd;
641 u_long com;
642 caddr_t data;
643 };
644 #endif
645 /* ARGSUSED */
646 int
647 sys_ioctl(struct thread *td, struct ioctl_args *uap)
648 {
649 u_long com;
650 int arg, error;
651 u_int size;
652 caddr_t data;
653
654 if (uap->com > 0xffffffff) {
655 printf(
656 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
657 td->td_proc->p_pid, td->td_name, uap->com);
658 uap->com &= 0xffffffff;
659 }
660 com = uap->com;
661
662 /*
663 * Interpret high order word to find amount of data to be
664 * copied to/from the user's address space.
665 */
666 size = IOCPARM_LEN(com);
667 if ((size > IOCPARM_MAX) ||
668 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
669 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
670 ((com & IOC_OUT) && size == 0) ||
671 #else
672 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
673 #endif
674 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
675 return (ENOTTY);
676
677 if (size > 0) {
678 if (com & IOC_VOID) {
679 /* Integer argument. */
680 arg = (intptr_t)uap->data;
681 data = (void *)&arg;
682 size = 0;
683 } else
684 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
685 } else
686 data = (void *)&uap->data;
687 if (com & IOC_IN) {
688 error = copyin(uap->data, data, (u_int)size);
689 if (error) {
690 if (size > 0)
691 free(data, M_IOCTLOPS);
692 return (error);
693 }
694 } else if (com & IOC_OUT) {
695 /*
696 * Zero the buffer so the user always
697 * gets back something deterministic.
698 */
699 bzero(data, size);
700 }
701
702 error = kern_ioctl(td, uap->fd, com, data);
703
704 if (error == 0 && (com & IOC_OUT))
705 error = copyout(data, uap->data, (u_int)size);
706
707 if (size > 0)
708 free(data, M_IOCTLOPS);
709 return (error);
710 }
711
712 int
713 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
714 {
715 struct file *fp;
716 struct filedesc *fdp;
717 #ifndef CAPABILITIES
718 cap_rights_t rights;
719 #endif
720 int error, tmp, locked;
721
722 AUDIT_ARG_FD(fd);
723 AUDIT_ARG_CMD(com);
724
725 fdp = td->td_proc->p_fd;
726
727 switch (com) {
728 case FIONCLEX:
729 case FIOCLEX:
730 FILEDESC_XLOCK(fdp);
731 locked = LA_XLOCKED;
732 break;
733 default:
734 #ifdef CAPABILITIES
735 FILEDESC_SLOCK(fdp);
736 locked = LA_SLOCKED;
737 #else
738 locked = LA_UNLOCKED;
739 #endif
740 break;
741 }
742
743 #ifdef CAPABILITIES
744 if ((fp = fget_locked(fdp, fd)) == NULL) {
745 error = EBADF;
746 goto out;
747 }
748 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
749 fp = NULL; /* fhold() was not called yet */
750 goto out;
751 }
752 fhold(fp);
753 if (locked == LA_SLOCKED) {
754 FILEDESC_SUNLOCK(fdp);
755 locked = LA_UNLOCKED;
756 }
757 #else
758 error = fget(td, fd, cap_rights_init(&rights, CAP_IOCTL), &fp);
759 if (error != 0) {
760 fp = NULL;
761 goto out;
762 }
763 #endif
764 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
765 error = EBADF;
766 goto out;
767 }
768
769 switch (com) {
770 case FIONCLEX:
771 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
772 goto out;
773 case FIOCLEX:
774 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
775 goto out;
776 case FIONBIO:
777 if ((tmp = *(int *)data))
778 atomic_set_int(&fp->f_flag, FNONBLOCK);
779 else
780 atomic_clear_int(&fp->f_flag, FNONBLOCK);
781 data = (void *)&tmp;
782 break;
783 case FIOASYNC:
784 if ((tmp = *(int *)data))
785 atomic_set_int(&fp->f_flag, FASYNC);
786 else
787 atomic_clear_int(&fp->f_flag, FASYNC);
788 data = (void *)&tmp;
789 break;
790 }
791
792 error = fo_ioctl(fp, com, data, td->td_ucred, td);
793 out:
794 switch (locked) {
795 case LA_XLOCKED:
796 FILEDESC_XUNLOCK(fdp);
797 break;
798 #ifdef CAPABILITIES
799 case LA_SLOCKED:
800 FILEDESC_SUNLOCK(fdp);
801 break;
802 #endif
803 default:
804 FILEDESC_UNLOCK_ASSERT(fdp);
805 break;
806 }
807 if (fp != NULL)
808 fdrop(fp, td);
809 return (error);
810 }
811
812 int
813 poll_no_poll(int events)
814 {
815 /*
816 * Return true for read/write. If the user asked for something
817 * special, return POLLNVAL, so that clients have a way of
818 * determining reliably whether or not the extended
819 * functionality is present without hard-coding knowledge
820 * of specific filesystem implementations.
821 */
822 if (events & ~POLLSTANDARD)
823 return (POLLNVAL);
824
825 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
826 }
827
828 int
829 sys_pselect(struct thread *td, struct pselect_args *uap)
830 {
831 struct timespec ts;
832 struct timeval tv, *tvp;
833 sigset_t set, *uset;
834 int error;
835
836 if (uap->ts != NULL) {
837 error = copyin(uap->ts, &ts, sizeof(ts));
838 if (error != 0)
839 return (error);
840 TIMESPEC_TO_TIMEVAL(&tv, &ts);
841 tvp = &tv;
842 } else
843 tvp = NULL;
844 if (uap->sm != NULL) {
845 error = copyin(uap->sm, &set, sizeof(set));
846 if (error != 0)
847 return (error);
848 uset = &set;
849 } else
850 uset = NULL;
851 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
852 uset, NFDBITS));
853 }
854
855 int
856 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
857 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
858 {
859 int error;
860
861 if (uset != NULL) {
862 error = kern_sigprocmask(td, SIG_SETMASK, uset,
863 &td->td_oldsigmask, 0);
864 if (error != 0)
865 return (error);
866 td->td_pflags |= TDP_OLDMASK;
867 /*
868 * Make sure that ast() is called on return to
869 * usermode and TDP_OLDMASK is cleared, restoring old
870 * sigmask.
871 */
872 thread_lock(td);
873 td->td_flags |= TDF_ASTPENDING;
874 thread_unlock(td);
875 }
876 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
877 return (error);
878 }
879
880 #ifndef _SYS_SYSPROTO_H_
881 struct select_args {
882 int nd;
883 fd_set *in, *ou, *ex;
884 struct timeval *tv;
885 };
886 #endif
887 int
888 sys_select(struct thread *td, struct select_args *uap)
889 {
890 struct timeval tv, *tvp;
891 int error;
892
893 if (uap->tv != NULL) {
894 error = copyin(uap->tv, &tv, sizeof(tv));
895 if (error)
896 return (error);
897 tvp = &tv;
898 } else
899 tvp = NULL;
900
901 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
902 NFDBITS));
903 }
904
905 /*
906 * In the unlikely case when user specified n greater then the last
907 * open file descriptor, check that no bits are set after the last
908 * valid fd. We must return EBADF if any is set.
909 *
910 * There are applications that rely on the behaviour.
911 *
912 * nd is fd_lastfile + 1.
913 */
914 static int
915 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
916 {
917 char *addr, *oaddr;
918 int b, i, res;
919 uint8_t bits;
920
921 if (nd >= ndu || fd_in == NULL)
922 return (0);
923
924 oaddr = NULL;
925 bits = 0; /* silence gcc */
926 for (i = nd; i < ndu; i++) {
927 b = i / NBBY;
928 #if BYTE_ORDER == LITTLE_ENDIAN
929 addr = (char *)fd_in + b;
930 #else
931 addr = (char *)fd_in;
932 if (abi_nfdbits == NFDBITS) {
933 addr += rounddown(b, sizeof(fd_mask)) +
934 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
935 } else {
936 addr += rounddown(b, sizeof(uint32_t)) +
937 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
938 }
939 #endif
940 if (addr != oaddr) {
941 res = fubyte(addr);
942 if (res == -1)
943 return (EFAULT);
944 oaddr = addr;
945 bits = res;
946 }
947 if ((bits & (1 << (i % NBBY))) != 0)
948 return (EBADF);
949 }
950 return (0);
951 }
952
953 int
954 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
955 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
956 {
957 struct filedesc *fdp;
958 /*
959 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
960 * infds with the new FD_SETSIZE of 1024, and more than enough for
961 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
962 * of 256.
963 */
964 fd_mask s_selbits[howmany(2048, NFDBITS)];
965 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
966 struct timeval rtv;
967 sbintime_t asbt, precision, rsbt;
968 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
969 int error, lf, ndu;
970
971 if (nd < 0)
972 return (EINVAL);
973 fdp = td->td_proc->p_fd;
974 ndu = nd;
975 lf = fdp->fd_lastfile;
976 if (nd > lf + 1)
977 nd = lf + 1;
978
979 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
980 if (error != 0)
981 return (error);
982 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
983 if (error != 0)
984 return (error);
985 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
986 if (error != 0)
987 return (error);
988
989 /*
990 * Allocate just enough bits for the non-null fd_sets. Use the
991 * preallocated auto buffer if possible.
992 */
993 nfdbits = roundup(nd, NFDBITS);
994 ncpbytes = nfdbits / NBBY;
995 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
996 nbufbytes = 0;
997 if (fd_in != NULL)
998 nbufbytes += 2 * ncpbytes;
999 if (fd_ou != NULL)
1000 nbufbytes += 2 * ncpbytes;
1001 if (fd_ex != NULL)
1002 nbufbytes += 2 * ncpbytes;
1003 if (nbufbytes <= sizeof s_selbits)
1004 selbits = &s_selbits[0];
1005 else
1006 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1007
1008 /*
1009 * Assign pointers into the bit buffers and fetch the input bits.
1010 * Put the output buffers together so that they can be bzeroed
1011 * together.
1012 */
1013 sbp = selbits;
1014 #define getbits(name, x) \
1015 do { \
1016 if (name == NULL) { \
1017 ibits[x] = NULL; \
1018 obits[x] = NULL; \
1019 } else { \
1020 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
1021 obits[x] = sbp; \
1022 sbp += ncpbytes / sizeof *sbp; \
1023 error = copyin(name, ibits[x], ncpubytes); \
1024 if (error != 0) \
1025 goto done; \
1026 bzero((char *)ibits[x] + ncpubytes, \
1027 ncpbytes - ncpubytes); \
1028 } \
1029 } while (0)
1030 getbits(fd_in, 0);
1031 getbits(fd_ou, 1);
1032 getbits(fd_ex, 2);
1033 #undef getbits
1034
1035 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1036 /*
1037 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1038 * we are running under 32-bit emulation. This should be more
1039 * generic.
1040 */
1041 #define swizzle_fdset(bits) \
1042 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1043 int i; \
1044 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1045 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1046 }
1047 #else
1048 #define swizzle_fdset(bits)
1049 #endif
1050
1051 /* Make sure the bit order makes it through an ABI transition */
1052 swizzle_fdset(ibits[0]);
1053 swizzle_fdset(ibits[1]);
1054 swizzle_fdset(ibits[2]);
1055
1056 if (nbufbytes != 0)
1057 bzero(selbits, nbufbytes / 2);
1058
1059 precision = 0;
1060 if (tvp != NULL) {
1061 rtv = *tvp;
1062 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1063 rtv.tv_usec >= 1000000) {
1064 error = EINVAL;
1065 goto done;
1066 }
1067 if (!timevalisset(&rtv))
1068 asbt = 0;
1069 else if (rtv.tv_sec <= INT32_MAX) {
1070 rsbt = tvtosbt(rtv);
1071 precision = rsbt;
1072 precision >>= tc_precexp;
1073 if (TIMESEL(&asbt, rsbt))
1074 asbt += tc_tick_sbt;
1075 if (asbt <= INT64_MAX - rsbt)
1076 asbt += rsbt;
1077 else
1078 asbt = -1;
1079 } else
1080 asbt = -1;
1081 } else
1082 asbt = -1;
1083 seltdinit(td);
1084 /* Iterate until the timeout expires or descriptors become ready. */
1085 for (;;) {
1086 error = selscan(td, ibits, obits, nd);
1087 if (error || td->td_retval[0] != 0)
1088 break;
1089 error = seltdwait(td, asbt, precision);
1090 if (error)
1091 break;
1092 error = selrescan(td, ibits, obits);
1093 if (error || td->td_retval[0] != 0)
1094 break;
1095 }
1096 seltdclear(td);
1097
1098 done:
1099 /* select is not restarted after signals... */
1100 if (error == ERESTART)
1101 error = EINTR;
1102 if (error == EWOULDBLOCK)
1103 error = 0;
1104
1105 /* swizzle bit order back, if necessary */
1106 swizzle_fdset(obits[0]);
1107 swizzle_fdset(obits[1]);
1108 swizzle_fdset(obits[2]);
1109 #undef swizzle_fdset
1110
1111 #define putbits(name, x) \
1112 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1113 error = error2;
1114 if (error == 0) {
1115 int error2;
1116
1117 putbits(fd_in, 0);
1118 putbits(fd_ou, 1);
1119 putbits(fd_ex, 2);
1120 #undef putbits
1121 }
1122 if (selbits != &s_selbits[0])
1123 free(selbits, M_SELECT);
1124
1125 return (error);
1126 }
1127 /*
1128 * Convert a select bit set to poll flags.
1129 *
1130 * The backend always returns POLLHUP/POLLERR if appropriate and we
1131 * return this as a set bit in any set.
1132 */
1133 static int select_flags[3] = {
1134 POLLRDNORM | POLLHUP | POLLERR,
1135 POLLWRNORM | POLLHUP | POLLERR,
1136 POLLRDBAND | POLLERR
1137 };
1138
1139 /*
1140 * Compute the fo_poll flags required for a fd given by the index and
1141 * bit position in the fd_mask array.
1142 */
1143 static __inline int
1144 selflags(fd_mask **ibits, int idx, fd_mask bit)
1145 {
1146 int flags;
1147 int msk;
1148
1149 flags = 0;
1150 for (msk = 0; msk < 3; msk++) {
1151 if (ibits[msk] == NULL)
1152 continue;
1153 if ((ibits[msk][idx] & bit) == 0)
1154 continue;
1155 flags |= select_flags[msk];
1156 }
1157 return (flags);
1158 }
1159
1160 /*
1161 * Set the appropriate output bits given a mask of fired events and the
1162 * input bits originally requested.
1163 */
1164 static __inline int
1165 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1166 {
1167 int msk;
1168 int n;
1169
1170 n = 0;
1171 for (msk = 0; msk < 3; msk++) {
1172 if ((events & select_flags[msk]) == 0)
1173 continue;
1174 if (ibits[msk] == NULL)
1175 continue;
1176 if ((ibits[msk][idx] & bit) == 0)
1177 continue;
1178 /*
1179 * XXX Check for a duplicate set. This can occur because a
1180 * socket calls selrecord() twice for each poll() call
1181 * resulting in two selfds per real fd. selrescan() will
1182 * call selsetbits twice as a result.
1183 */
1184 if ((obits[msk][idx] & bit) != 0)
1185 continue;
1186 obits[msk][idx] |= bit;
1187 n++;
1188 }
1189
1190 return (n);
1191 }
1192
1193 static __inline int
1194 getselfd_cap(struct filedesc *fdp, int fd, struct file **fpp)
1195 {
1196 cap_rights_t rights;
1197
1198 cap_rights_init(&rights, CAP_EVENT);
1199
1200 return (fget_unlocked(fdp, fd, &rights, 0, fpp, NULL));
1201 }
1202
1203 /*
1204 * Traverse the list of fds attached to this thread's seltd and check for
1205 * completion.
1206 */
1207 static int
1208 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1209 {
1210 struct filedesc *fdp;
1211 struct selinfo *si;
1212 struct seltd *stp;
1213 struct selfd *sfp;
1214 struct selfd *sfn;
1215 struct file *fp;
1216 fd_mask bit;
1217 int fd, ev, n, idx;
1218 int error;
1219
1220 fdp = td->td_proc->p_fd;
1221 stp = td->td_sel;
1222 n = 0;
1223 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1224 fd = (int)(uintptr_t)sfp->sf_cookie;
1225 si = sfp->sf_si;
1226 selfdfree(stp, sfp);
1227 /* If the selinfo wasn't cleared the event didn't fire. */
1228 if (si != NULL)
1229 continue;
1230 error = getselfd_cap(fdp, fd, &fp);
1231 if (error)
1232 return (error);
1233 idx = fd / NFDBITS;
1234 bit = (fd_mask)1 << (fd % NFDBITS);
1235 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1236 fdrop(fp, td);
1237 if (ev != 0)
1238 n += selsetbits(ibits, obits, idx, bit, ev);
1239 }
1240 stp->st_flags = 0;
1241 td->td_retval[0] = n;
1242 return (0);
1243 }
1244
1245 /*
1246 * Perform the initial filedescriptor scan and register ourselves with
1247 * each selinfo.
1248 */
1249 static int
1250 selscan(td, ibits, obits, nfd)
1251 struct thread *td;
1252 fd_mask **ibits, **obits;
1253 int nfd;
1254 {
1255 struct filedesc *fdp;
1256 struct file *fp;
1257 fd_mask bit;
1258 int ev, flags, end, fd;
1259 int n, idx;
1260 int error;
1261
1262 fdp = td->td_proc->p_fd;
1263 n = 0;
1264 for (idx = 0, fd = 0; fd < nfd; idx++) {
1265 end = imin(fd + NFDBITS, nfd);
1266 for (bit = 1; fd < end; bit <<= 1, fd++) {
1267 /* Compute the list of events we're interested in. */
1268 flags = selflags(ibits, idx, bit);
1269 if (flags == 0)
1270 continue;
1271 error = getselfd_cap(fdp, fd, &fp);
1272 if (error)
1273 return (error);
1274 selfdalloc(td, (void *)(uintptr_t)fd);
1275 ev = fo_poll(fp, flags, td->td_ucred, td);
1276 fdrop(fp, td);
1277 if (ev != 0)
1278 n += selsetbits(ibits, obits, idx, bit, ev);
1279 }
1280 }
1281
1282 td->td_retval[0] = n;
1283 return (0);
1284 }
1285
1286 #ifndef _SYS_SYSPROTO_H_
1287 struct poll_args {
1288 struct pollfd *fds;
1289 u_int nfds;
1290 int timeout;
1291 };
1292 #endif
1293 int
1294 sys_poll(td, uap)
1295 struct thread *td;
1296 struct poll_args *uap;
1297 {
1298 struct pollfd *bits;
1299 struct pollfd smallbits[32];
1300 sbintime_t asbt, precision, rsbt;
1301 u_int nfds;
1302 int error;
1303 size_t ni;
1304
1305 nfds = uap->nfds;
1306 if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1307 return (EINVAL);
1308 ni = nfds * sizeof(struct pollfd);
1309 if (ni > sizeof(smallbits))
1310 bits = malloc(ni, M_TEMP, M_WAITOK);
1311 else
1312 bits = smallbits;
1313 error = copyin(uap->fds, bits, ni);
1314 if (error)
1315 goto done;
1316 precision = 0;
1317 if (uap->timeout != INFTIM) {
1318 if (uap->timeout < 0) {
1319 error = EINVAL;
1320 goto done;
1321 }
1322 if (uap->timeout == 0)
1323 asbt = 0;
1324 else {
1325 rsbt = SBT_1MS * uap->timeout;
1326 precision = rsbt;
1327 precision >>= tc_precexp;
1328 if (TIMESEL(&asbt, rsbt))
1329 asbt += tc_tick_sbt;
1330 asbt += rsbt;
1331 }
1332 } else
1333 asbt = -1;
1334 seltdinit(td);
1335 /* Iterate until the timeout expires or descriptors become ready. */
1336 for (;;) {
1337 error = pollscan(td, bits, nfds);
1338 if (error || td->td_retval[0] != 0)
1339 break;
1340 error = seltdwait(td, asbt, precision);
1341 if (error)
1342 break;
1343 error = pollrescan(td);
1344 if (error || td->td_retval[0] != 0)
1345 break;
1346 }
1347 seltdclear(td);
1348
1349 done:
1350 /* poll is not restarted after signals... */
1351 if (error == ERESTART)
1352 error = EINTR;
1353 if (error == EWOULDBLOCK)
1354 error = 0;
1355 if (error == 0) {
1356 error = pollout(td, bits, uap->fds, nfds);
1357 if (error)
1358 goto out;
1359 }
1360 out:
1361 if (ni > sizeof(smallbits))
1362 free(bits, M_TEMP);
1363 return (error);
1364 }
1365
1366 static int
1367 pollrescan(struct thread *td)
1368 {
1369 struct seltd *stp;
1370 struct selfd *sfp;
1371 struct selfd *sfn;
1372 struct selinfo *si;
1373 struct filedesc *fdp;
1374 struct file *fp;
1375 struct pollfd *fd;
1376 #ifdef CAPABILITIES
1377 cap_rights_t rights;
1378 #endif
1379 int n;
1380
1381 n = 0;
1382 fdp = td->td_proc->p_fd;
1383 stp = td->td_sel;
1384 FILEDESC_SLOCK(fdp);
1385 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1386 fd = (struct pollfd *)sfp->sf_cookie;
1387 si = sfp->sf_si;
1388 selfdfree(stp, sfp);
1389 /* If the selinfo wasn't cleared the event didn't fire. */
1390 if (si != NULL)
1391 continue;
1392 fp = fdp->fd_ofiles[fd->fd].fde_file;
1393 #ifdef CAPABILITIES
1394 if (fp == NULL ||
1395 cap_check(cap_rights(fdp, fd->fd),
1396 cap_rights_init(&rights, CAP_EVENT)) != 0)
1397 #else
1398 if (fp == NULL)
1399 #endif
1400 {
1401 fd->revents = POLLNVAL;
1402 n++;
1403 continue;
1404 }
1405
1406 /*
1407 * Note: backend also returns POLLHUP and
1408 * POLLERR if appropriate.
1409 */
1410 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1411 if (fd->revents != 0)
1412 n++;
1413 }
1414 FILEDESC_SUNLOCK(fdp);
1415 stp->st_flags = 0;
1416 td->td_retval[0] = n;
1417 return (0);
1418 }
1419
1420
1421 static int
1422 pollout(td, fds, ufds, nfd)
1423 struct thread *td;
1424 struct pollfd *fds;
1425 struct pollfd *ufds;
1426 u_int nfd;
1427 {
1428 int error = 0;
1429 u_int i = 0;
1430 u_int n = 0;
1431
1432 for (i = 0; i < nfd; i++) {
1433 error = copyout(&fds->revents, &ufds->revents,
1434 sizeof(ufds->revents));
1435 if (error)
1436 return (error);
1437 if (fds->revents != 0)
1438 n++;
1439 fds++;
1440 ufds++;
1441 }
1442 td->td_retval[0] = n;
1443 return (0);
1444 }
1445
1446 static int
1447 pollscan(td, fds, nfd)
1448 struct thread *td;
1449 struct pollfd *fds;
1450 u_int nfd;
1451 {
1452 struct filedesc *fdp = td->td_proc->p_fd;
1453 struct file *fp;
1454 #ifdef CAPABILITIES
1455 cap_rights_t rights;
1456 #endif
1457 int i, n = 0;
1458
1459 FILEDESC_SLOCK(fdp);
1460 for (i = 0; i < nfd; i++, fds++) {
1461 if (fds->fd > fdp->fd_lastfile) {
1462 fds->revents = POLLNVAL;
1463 n++;
1464 } else if (fds->fd < 0) {
1465 fds->revents = 0;
1466 } else {
1467 fp = fdp->fd_ofiles[fds->fd].fde_file;
1468 #ifdef CAPABILITIES
1469 if (fp == NULL ||
1470 cap_check(cap_rights(fdp, fds->fd),
1471 cap_rights_init(&rights, CAP_EVENT)) != 0)
1472 #else
1473 if (fp == NULL)
1474 #endif
1475 {
1476 fds->revents = POLLNVAL;
1477 n++;
1478 } else {
1479 /*
1480 * Note: backend also returns POLLHUP and
1481 * POLLERR if appropriate.
1482 */
1483 selfdalloc(td, fds);
1484 fds->revents = fo_poll(fp, fds->events,
1485 td->td_ucred, td);
1486 /*
1487 * POSIX requires POLLOUT to be never
1488 * set simultaneously with POLLHUP.
1489 */
1490 if ((fds->revents & POLLHUP) != 0)
1491 fds->revents &= ~POLLOUT;
1492
1493 if (fds->revents != 0)
1494 n++;
1495 }
1496 }
1497 }
1498 FILEDESC_SUNLOCK(fdp);
1499 td->td_retval[0] = n;
1500 return (0);
1501 }
1502
1503 /*
1504 * OpenBSD poll system call.
1505 *
1506 * XXX this isn't quite a true representation.. OpenBSD uses select ops.
1507 */
1508 #ifndef _SYS_SYSPROTO_H_
1509 struct openbsd_poll_args {
1510 struct pollfd *fds;
1511 u_int nfds;
1512 int timeout;
1513 };
1514 #endif
1515 int
1516 sys_openbsd_poll(td, uap)
1517 register struct thread *td;
1518 register struct openbsd_poll_args *uap;
1519 {
1520 return (sys_poll(td, (struct poll_args *)uap));
1521 }
1522
1523 /*
1524 * XXX This was created specifically to support netncp and netsmb. This
1525 * allows the caller to specify a socket to wait for events on. It returns
1526 * 0 if any events matched and an error otherwise. There is no way to
1527 * determine which events fired.
1528 */
1529 int
1530 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1531 {
1532 struct timeval rtv;
1533 sbintime_t asbt, precision, rsbt;
1534 int error;
1535
1536 precision = 0; /* stupid gcc! */
1537 if (tvp != NULL) {
1538 rtv = *tvp;
1539 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1540 rtv.tv_usec >= 1000000)
1541 return (EINVAL);
1542 if (!timevalisset(&rtv))
1543 asbt = 0;
1544 else if (rtv.tv_sec <= INT32_MAX) {
1545 rsbt = tvtosbt(rtv);
1546 precision = rsbt;
1547 precision >>= tc_precexp;
1548 if (TIMESEL(&asbt, rsbt))
1549 asbt += tc_tick_sbt;
1550 if (asbt <= INT64_MAX - rsbt)
1551 asbt += rsbt;
1552 else
1553 asbt = -1;
1554 } else
1555 asbt = -1;
1556 } else
1557 asbt = -1;
1558 seltdinit(td);
1559 /*
1560 * Iterate until the timeout expires or the socket becomes ready.
1561 */
1562 for (;;) {
1563 selfdalloc(td, NULL);
1564 error = sopoll(so, events, NULL, td);
1565 /* error here is actually the ready events. */
1566 if (error)
1567 return (0);
1568 error = seltdwait(td, asbt, precision);
1569 if (error)
1570 break;
1571 }
1572 seltdclear(td);
1573 /* XXX Duplicates ncp/smb behavior. */
1574 if (error == ERESTART)
1575 error = 0;
1576 return (error);
1577 }
1578
1579 /*
1580 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1581 * have two select sets, one for read and another for write.
1582 */
1583 static void
1584 selfdalloc(struct thread *td, void *cookie)
1585 {
1586 struct seltd *stp;
1587
1588 stp = td->td_sel;
1589 if (stp->st_free1 == NULL)
1590 stp->st_free1 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1591 stp->st_free1->sf_td = stp;
1592 stp->st_free1->sf_cookie = cookie;
1593 if (stp->st_free2 == NULL)
1594 stp->st_free2 = uma_zalloc(selfd_zone, M_WAITOK|M_ZERO);
1595 stp->st_free2->sf_td = stp;
1596 stp->st_free2->sf_cookie = cookie;
1597 }
1598
1599 static void
1600 selfdfree(struct seltd *stp, struct selfd *sfp)
1601 {
1602 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1603 mtx_lock(sfp->sf_mtx);
1604 if (sfp->sf_si)
1605 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1606 mtx_unlock(sfp->sf_mtx);
1607 uma_zfree(selfd_zone, sfp);
1608 }
1609
1610 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1611 void
1612 seldrain(sip)
1613 struct selinfo *sip;
1614 {
1615
1616 /*
1617 * This feature is already provided by doselwakeup(), thus it is
1618 * enough to go for it.
1619 * Eventually, the context, should take care to avoid races
1620 * between thread calling select()/poll() and file descriptor
1621 * detaching, but, again, the races are just the same as
1622 * selwakeup().
1623 */
1624 doselwakeup(sip, -1);
1625 }
1626
1627 /*
1628 * Record a select request.
1629 */
1630 void
1631 selrecord(selector, sip)
1632 struct thread *selector;
1633 struct selinfo *sip;
1634 {
1635 struct selfd *sfp;
1636 struct seltd *stp;
1637 struct mtx *mtxp;
1638
1639 stp = selector->td_sel;
1640 /*
1641 * Don't record when doing a rescan.
1642 */
1643 if (stp->st_flags & SELTD_RESCAN)
1644 return;
1645 /*
1646 * Grab one of the preallocated descriptors.
1647 */
1648 sfp = NULL;
1649 if ((sfp = stp->st_free1) != NULL)
1650 stp->st_free1 = NULL;
1651 else if ((sfp = stp->st_free2) != NULL)
1652 stp->st_free2 = NULL;
1653 else
1654 panic("selrecord: No free selfd on selq");
1655 mtxp = sip->si_mtx;
1656 if (mtxp == NULL)
1657 mtxp = mtx_pool_find(mtxpool_select, sip);
1658 /*
1659 * Initialize the sfp and queue it in the thread.
1660 */
1661 sfp->sf_si = sip;
1662 sfp->sf_mtx = mtxp;
1663 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1664 /*
1665 * Now that we've locked the sip, check for initialization.
1666 */
1667 mtx_lock(mtxp);
1668 if (sip->si_mtx == NULL) {
1669 sip->si_mtx = mtxp;
1670 TAILQ_INIT(&sip->si_tdlist);
1671 }
1672 /*
1673 * Add this thread to the list of selfds listening on this selinfo.
1674 */
1675 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1676 mtx_unlock(sip->si_mtx);
1677 }
1678
1679 /* Wake up a selecting thread. */
1680 void
1681 selwakeup(sip)
1682 struct selinfo *sip;
1683 {
1684 doselwakeup(sip, -1);
1685 }
1686
1687 /* Wake up a selecting thread, and set its priority. */
1688 void
1689 selwakeuppri(sip, pri)
1690 struct selinfo *sip;
1691 int pri;
1692 {
1693 doselwakeup(sip, pri);
1694 }
1695
1696 /*
1697 * Do a wakeup when a selectable event occurs.
1698 */
1699 static void
1700 doselwakeup(sip, pri)
1701 struct selinfo *sip;
1702 int pri;
1703 {
1704 struct selfd *sfp;
1705 struct selfd *sfn;
1706 struct seltd *stp;
1707
1708 /* If it's not initialized there can't be any waiters. */
1709 if (sip->si_mtx == NULL)
1710 return;
1711 /*
1712 * Locking the selinfo locks all selfds associated with it.
1713 */
1714 mtx_lock(sip->si_mtx);
1715 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1716 /*
1717 * Once we remove this sfp from the list and clear the
1718 * sf_si seltdclear will know to ignore this si.
1719 */
1720 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1721 sfp->sf_si = NULL;
1722 stp = sfp->sf_td;
1723 mtx_lock(&stp->st_mtx);
1724 stp->st_flags |= SELTD_PENDING;
1725 cv_broadcastpri(&stp->st_wait, pri);
1726 mtx_unlock(&stp->st_mtx);
1727 }
1728 mtx_unlock(sip->si_mtx);
1729 }
1730
1731 static void
1732 seltdinit(struct thread *td)
1733 {
1734 struct seltd *stp;
1735
1736 if ((stp = td->td_sel) != NULL)
1737 goto out;
1738 td->td_sel = stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1739 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1740 cv_init(&stp->st_wait, "select");
1741 out:
1742 stp->st_flags = 0;
1743 STAILQ_INIT(&stp->st_selq);
1744 }
1745
1746 static int
1747 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1748 {
1749 struct seltd *stp;
1750 int error;
1751
1752 stp = td->td_sel;
1753 /*
1754 * An event of interest may occur while we do not hold the seltd
1755 * locked so check the pending flag before we sleep.
1756 */
1757 mtx_lock(&stp->st_mtx);
1758 /*
1759 * Any further calls to selrecord will be a rescan.
1760 */
1761 stp->st_flags |= SELTD_RESCAN;
1762 if (stp->st_flags & SELTD_PENDING) {
1763 mtx_unlock(&stp->st_mtx);
1764 return (0);
1765 }
1766 if (sbt == 0)
1767 error = EWOULDBLOCK;
1768 else if (sbt != -1)
1769 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
1770 sbt, precision, C_ABSOLUTE);
1771 else
1772 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1773 mtx_unlock(&stp->st_mtx);
1774
1775 return (error);
1776 }
1777
1778 void
1779 seltdfini(struct thread *td)
1780 {
1781 struct seltd *stp;
1782
1783 stp = td->td_sel;
1784 if (stp == NULL)
1785 return;
1786 if (stp->st_free1)
1787 uma_zfree(selfd_zone, stp->st_free1);
1788 if (stp->st_free2)
1789 uma_zfree(selfd_zone, stp->st_free2);
1790 td->td_sel = NULL;
1791 free(stp, M_SELECT);
1792 }
1793
1794 /*
1795 * Remove the references to the thread from all of the objects we were
1796 * polling.
1797 */
1798 static void
1799 seltdclear(struct thread *td)
1800 {
1801 struct seltd *stp;
1802 struct selfd *sfp;
1803 struct selfd *sfn;
1804
1805 stp = td->td_sel;
1806 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1807 selfdfree(stp, sfp);
1808 stp->st_flags = 0;
1809 }
1810
1811 static void selectinit(void *);
1812 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1813 static void
1814 selectinit(void *dummy __unused)
1815 {
1816
1817 selfd_zone = uma_zcreate("selfd", sizeof(struct selfd), NULL, NULL,
1818 NULL, NULL, UMA_ALIGN_PTR, 0);
1819 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
1820 }
Cache object: 678452cf74b0c4c53d742df42bfb2b6a
|