FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_pipe.c
1 /* $NetBSD: sys_pipe.c,v 1.103.4.5 2009/04/04 23:36:27 snj Exp $ */
2
3 /*-
4 * Copyright (c) 2003, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1996 John S. Dyson
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice immediately at the beginning of the file, without modification,
41 * this list of conditions, and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Absolutely no warranty of function or purpose is made by the author
46 * John S. Dyson.
47 * 4. Modifications may be freely made to this file if the above conditions
48 * are met.
49 */
50
51 /*
52 * This file contains a high-performance replacement for the socket-based
53 * pipes scheme originally used. It does not support all features of
54 * sockets, but does do everything that pipes normally do.
55 *
56 * This code has two modes of operation, a small write mode and a large
57 * write mode. The small write mode acts like conventional pipes with
58 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
59 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
60 * and PIPE_SIZE in size it is mapped read-only into the kernel address space
61 * using the UVM page loan facility from where the receiving process can copy
62 * the data directly from the pages in the sending process.
63 *
64 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
65 * happen for small transfers so that the system will not spend all of
66 * its time context switching. PIPE_SIZE is constrained by the
67 * amount of kernel virtual memory.
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sys_pipe.c,v 1.103.4.5 2009/04/04 23:36:27 snj Exp $");
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>
76 #include <sys/fcntl.h>
77 #include <sys/file.h>
78 #include <sys/filedesc.h>
79 #include <sys/filio.h>
80 #include <sys/kernel.h>
81 #include <sys/ttycom.h>
82 #include <sys/stat.h>
83 #include <sys/poll.h>
84 #include <sys/signalvar.h>
85 #include <sys/vnode.h>
86 #include <sys/uio.h>
87 #include <sys/select.h>
88 #include <sys/mount.h>
89 #include <sys/syscallargs.h>
90 #include <sys/sysctl.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/pipe.h>
94
95 #include <uvm/uvm.h>
96
97 /* Use this define if you want to disable *fancy* VM things. */
98 /* XXX Disabled for now; rare hangs switching between direct/buffered */
99 #define PIPE_NODIRECT
100
101 /*
102 * interfaces to the outside world
103 */
104 static int pipe_read(struct file *fp, off_t *offset, struct uio *uio,
105 kauth_cred_t cred, int flags);
106 static int pipe_write(struct file *fp, off_t *offset, struct uio *uio,
107 kauth_cred_t cred, int flags);
108 static int pipe_close(struct file *fp);
109 static int pipe_poll(struct file *fp, int events);
110 static int pipe_kqfilter(struct file *fp, struct knote *kn);
111 static int pipe_stat(struct file *fp, struct stat *sb);
112 static int pipe_ioctl(struct file *fp, u_long cmd, void *data);
113
114 static const struct fileops pipeops = {
115 .fo_read = pipe_read,
116 .fo_write = pipe_write,
117 .fo_ioctl = pipe_ioctl,
118 .fo_fcntl = fnullop_fcntl,
119 .fo_poll = pipe_poll,
120 .fo_stat = pipe_stat,
121 .fo_close = pipe_close,
122 .fo_kqfilter = pipe_kqfilter,
123 .fo_drain = fnullop_drain,
124 };
125
126 /*
127 * Default pipe buffer size(s), this can be kind-of large now because pipe
128 * space is pageable. The pipe code will try to maintain locality of
129 * reference for performance reasons, so small amounts of outstanding I/O
130 * will not wipe the cache.
131 */
132 #define MINPIPESIZE (PIPE_SIZE/3)
133 #define MAXPIPESIZE (2*PIPE_SIZE/3)
134
135 /*
136 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
137 * is there so that on large systems, we don't exhaust it.
138 */
139 #define MAXPIPEKVA (8*1024*1024)
140 static u_int maxpipekva = MAXPIPEKVA;
141
142 /*
143 * Limit for direct transfers, we cannot, of course limit
144 * the amount of kva for pipes in general though.
145 */
146 #define LIMITPIPEKVA (16*1024*1024)
147 static u_int limitpipekva = LIMITPIPEKVA;
148
149 /*
150 * Limit the number of "big" pipes
151 */
152 #define LIMITBIGPIPES 32
153 static u_int maxbigpipes = LIMITBIGPIPES;
154 static u_int nbigpipe = 0;
155
156 /*
157 * Amount of KVA consumed by pipe buffers.
158 */
159 static u_int amountpipekva = 0;
160
161 static void pipeclose(struct file *fp, struct pipe *pipe);
162 static void pipe_free_kmem(struct pipe *pipe);
163 static int pipe_create(struct pipe **pipep, pool_cache_t, kmutex_t *);
164 static int pipelock(struct pipe *pipe, int catch);
165 static inline void pipeunlock(struct pipe *pipe);
166 static void pipeselwakeup(struct pipe *pipe, struct pipe *sigp, int code);
167 #ifndef PIPE_NODIRECT
168 static int pipe_direct_write(struct file *fp, struct pipe *wpipe,
169 struct uio *uio);
170 #endif
171 static int pipespace(struct pipe *pipe, int size);
172 static int pipe_ctor(void *, void *, int);
173 static void pipe_dtor(void *, void *);
174
175 #ifndef PIPE_NODIRECT
176 static int pipe_loan_alloc(struct pipe *, int);
177 static void pipe_loan_free(struct pipe *);
178 #endif /* PIPE_NODIRECT */
179
180 static pool_cache_t pipe_wr_cache;
181 static pool_cache_t pipe_rd_cache;
182
183 void
184 pipe_init(void)
185 {
186
187 /* Writer side is not automatically allocated KVA. */
188 pipe_wr_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "pipewr",
189 NULL, IPL_NONE, pipe_ctor, pipe_dtor, NULL);
190 KASSERT(pipe_wr_cache != NULL);
191
192 /* Reader side gets preallocated KVA. */
193 pipe_rd_cache = pool_cache_init(sizeof(struct pipe), 0, 0, 0, "piperd",
194 NULL, IPL_NONE, pipe_ctor, pipe_dtor, (void *)1);
195 KASSERT(pipe_rd_cache != NULL);
196 }
197
198 static int
199 pipe_ctor(void *arg, void *obj, int flags)
200 {
201 struct pipe *pipe;
202 vaddr_t va;
203
204 pipe = obj;
205
206 memset(pipe, 0, sizeof(struct pipe));
207 if (arg != NULL) {
208 /* Preallocate space. */
209 va = uvm_km_alloc(kernel_map, PIPE_SIZE, 0,
210 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
211 KASSERT(va != 0);
212 pipe->pipe_kmem = va;
213 atomic_add_int(&amountpipekva, PIPE_SIZE);
214 }
215 cv_init(&pipe->pipe_rcv, "piperd");
216 cv_init(&pipe->pipe_wcv, "pipewr");
217 cv_init(&pipe->pipe_draincv, "pipedrain");
218 cv_init(&pipe->pipe_lkcv, "pipelk");
219 selinit(&pipe->pipe_sel);
220 pipe->pipe_state = PIPE_SIGNALR;
221
222 return 0;
223 }
224
225 static void
226 pipe_dtor(void *arg, void *obj)
227 {
228 struct pipe *pipe;
229
230 pipe = obj;
231
232 cv_destroy(&pipe->pipe_rcv);
233 cv_destroy(&pipe->pipe_wcv);
234 cv_destroy(&pipe->pipe_draincv);
235 cv_destroy(&pipe->pipe_lkcv);
236 seldestroy(&pipe->pipe_sel);
237 if (pipe->pipe_kmem != 0) {
238 uvm_km_free(kernel_map, pipe->pipe_kmem, PIPE_SIZE,
239 UVM_KMF_PAGEABLE);
240 atomic_add_int(&amountpipekva, -PIPE_SIZE);
241 }
242 }
243
244 /*
245 * The pipe system call for the DTYPE_PIPE type of pipes
246 */
247
248 /* ARGSUSED */
249 int
250 sys_pipe(struct lwp *l, const void *v, register_t *retval)
251 {
252 struct file *rf, *wf;
253 struct pipe *rpipe, *wpipe;
254 kmutex_t *mutex;
255 int fd, error;
256 proc_t *p;
257
258 p = curproc;
259 rpipe = wpipe = NULL;
260 mutex = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
261 if (mutex == NULL)
262 return (ENOMEM);
263 mutex_obj_hold(mutex);
264 if (pipe_create(&rpipe, pipe_rd_cache, mutex) ||
265 pipe_create(&wpipe, pipe_wr_cache, mutex)) {
266 pipeclose(NULL, rpipe);
267 pipeclose(NULL, wpipe);
268 return (ENFILE);
269 }
270
271 error = fd_allocfile(&rf, &fd);
272 if (error)
273 goto free2;
274 retval[0] = fd;
275 rf->f_flag = FREAD;
276 rf->f_type = DTYPE_PIPE;
277 rf->f_data = (void *)rpipe;
278 rf->f_ops = &pipeops;
279
280 error = fd_allocfile(&wf, &fd);
281 if (error)
282 goto free3;
283 retval[1] = fd;
284 wf->f_flag = FWRITE;
285 wf->f_type = DTYPE_PIPE;
286 wf->f_data = (void *)wpipe;
287 wf->f_ops = &pipeops;
288
289 rpipe->pipe_peer = wpipe;
290 wpipe->pipe_peer = rpipe;
291
292 fd_affix(p, rf, (int)retval[0]);
293 fd_affix(p, wf, (int)retval[1]);
294 return (0);
295 free3:
296 fd_abort(p, rf, (int)retval[0]);
297 free2:
298 pipeclose(NULL, wpipe);
299 pipeclose(NULL, rpipe);
300
301 return (error);
302 }
303
304 /*
305 * Allocate kva for pipe circular buffer, the space is pageable
306 * This routine will 'realloc' the size of a pipe safely, if it fails
307 * it will retain the old buffer.
308 * If it fails it will return ENOMEM.
309 */
310 static int
311 pipespace(struct pipe *pipe, int size)
312 {
313 void *buffer;
314
315 /*
316 * Allocate pageable virtual address space. Physical memory is
317 * allocated on demand.
318 */
319 if (size == PIPE_SIZE && pipe->pipe_kmem != 0) {
320 buffer = (void *)pipe->pipe_kmem;
321 } else {
322 buffer = (void *)uvm_km_alloc(kernel_map, round_page(size),
323 0, UVM_KMF_PAGEABLE);
324 if (buffer == NULL)
325 return (ENOMEM);
326 atomic_add_int(&amountpipekva, size);
327 }
328
329 /* free old resources if we're resizing */
330 pipe_free_kmem(pipe);
331 pipe->pipe_buffer.buffer = buffer;
332 pipe->pipe_buffer.size = size;
333 pipe->pipe_buffer.in = 0;
334 pipe->pipe_buffer.out = 0;
335 pipe->pipe_buffer.cnt = 0;
336 return (0);
337 }
338
339 /*
340 * Initialize and allocate VM and memory for pipe.
341 */
342 static int
343 pipe_create(struct pipe **pipep, pool_cache_t cache, kmutex_t *mutex)
344 {
345 struct pipe *pipe;
346 int error;
347
348 pipe = pool_cache_get(cache, PR_WAITOK);
349 KASSERT(pipe != NULL);
350 *pipep = pipe;
351 error = 0;
352 getmicrotime(&pipe->pipe_ctime);
353 pipe->pipe_atime = pipe->pipe_ctime;
354 pipe->pipe_mtime = pipe->pipe_ctime;
355 pipe->pipe_lock = mutex;
356 if (cache == pipe_rd_cache) {
357 error = pipespace(pipe, PIPE_SIZE);
358 } else {
359 pipe->pipe_buffer.buffer = NULL;
360 pipe->pipe_buffer.size = 0;
361 pipe->pipe_buffer.in = 0;
362 pipe->pipe_buffer.out = 0;
363 pipe->pipe_buffer.cnt = 0;
364 }
365 return error;
366 }
367
368 /*
369 * Lock a pipe for I/O, blocking other access
370 * Called with pipe spin lock held.
371 * Return with pipe spin lock released on success.
372 */
373 static int
374 pipelock(struct pipe *pipe, int catch)
375 {
376 int error;
377
378 KASSERT(mutex_owned(pipe->pipe_lock));
379
380 while (pipe->pipe_state & PIPE_LOCKFL) {
381 pipe->pipe_state |= PIPE_LWANT;
382 if (catch) {
383 error = cv_wait_sig(&pipe->pipe_lkcv, pipe->pipe_lock);
384 if (error != 0)
385 return error;
386 } else
387 cv_wait(&pipe->pipe_lkcv, pipe->pipe_lock);
388 }
389
390 pipe->pipe_state |= PIPE_LOCKFL;
391
392 return 0;
393 }
394
395 /*
396 * unlock a pipe I/O lock
397 */
398 static inline void
399 pipeunlock(struct pipe *pipe)
400 {
401
402 KASSERT(pipe->pipe_state & PIPE_LOCKFL);
403
404 pipe->pipe_state &= ~PIPE_LOCKFL;
405 if (pipe->pipe_state & PIPE_LWANT) {
406 pipe->pipe_state &= ~PIPE_LWANT;
407 cv_broadcast(&pipe->pipe_lkcv);
408 }
409 }
410
411 /*
412 * Select/poll wakup. This also sends SIGIO to peer connected to
413 * 'sigpipe' side of pipe.
414 */
415 static void
416 pipeselwakeup(struct pipe *selp, struct pipe *sigp, int code)
417 {
418 int band;
419
420 switch (code) {
421 case POLL_IN:
422 band = POLLIN|POLLRDNORM;
423 break;
424 case POLL_OUT:
425 band = POLLOUT|POLLWRNORM;
426 break;
427 case POLL_HUP:
428 band = POLLHUP;
429 break;
430 #if POLL_HUP != POLL_ERR
431 case POLL_ERR:
432 band = POLLERR;
433 break;
434 #endif
435 default:
436 band = 0;
437 #ifdef DIAGNOSTIC
438 printf("bad siginfo code %d in pipe notification.\n", code);
439 #endif
440 break;
441 }
442
443 selnotify(&selp->pipe_sel, band, NOTE_SUBMIT);
444
445 if (sigp == NULL || (sigp->pipe_state & PIPE_ASYNC) == 0)
446 return;
447
448 fownsignal(sigp->pipe_pgid, SIGIO, code, band, selp);
449 }
450
451 /* ARGSUSED */
452 static int
453 pipe_read(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
454 int flags)
455 {
456 struct pipe *rpipe = (struct pipe *) fp->f_data;
457 struct pipebuf *bp = &rpipe->pipe_buffer;
458 kmutex_t *lock = rpipe->pipe_lock;
459 int error;
460 size_t nread = 0;
461 size_t size;
462 size_t ocnt;
463
464 mutex_enter(lock);
465 ++rpipe->pipe_busy;
466 ocnt = bp->cnt;
467
468 again:
469 error = pipelock(rpipe, 1);
470 if (error)
471 goto unlocked_error;
472
473 while (uio->uio_resid) {
474 /*
475 * normal pipe buffer receive
476 */
477 if (bp->cnt > 0) {
478 size = bp->size - bp->out;
479 if (size > bp->cnt)
480 size = bp->cnt;
481 if (size > uio->uio_resid)
482 size = uio->uio_resid;
483
484 mutex_exit(lock);
485 error = uiomove((char *)bp->buffer + bp->out, size, uio);
486 mutex_enter(lock);
487 if (error)
488 break;
489
490 bp->out += size;
491 if (bp->out >= bp->size)
492 bp->out = 0;
493
494 bp->cnt -= size;
495
496 /*
497 * If there is no more to read in the pipe, reset
498 * its pointers to the beginning. This improves
499 * cache hit stats.
500 */
501 if (bp->cnt == 0) {
502 bp->in = 0;
503 bp->out = 0;
504 }
505 nread += size;
506 continue;
507 }
508
509 #ifndef PIPE_NODIRECT
510 if ((rpipe->pipe_state & PIPE_DIRECTR) != 0) {
511 /*
512 * Direct copy, bypassing a kernel buffer.
513 */
514 void * va;
515
516 KASSERT(rpipe->pipe_state & PIPE_DIRECTW);
517
518 size = rpipe->pipe_map.cnt;
519 if (size > uio->uio_resid)
520 size = uio->uio_resid;
521
522 va = (char *)rpipe->pipe_map.kva + rpipe->pipe_map.pos;
523 mutex_exit(lock);
524 error = uiomove(va, size, uio);
525 mutex_enter(lock);
526 if (error)
527 break;
528 nread += size;
529 rpipe->pipe_map.pos += size;
530 rpipe->pipe_map.cnt -= size;
531 if (rpipe->pipe_map.cnt == 0) {
532 rpipe->pipe_state &= ~PIPE_DIRECTR;
533 cv_broadcast(&rpipe->pipe_wcv);
534 }
535 continue;
536 }
537 #endif
538 /*
539 * Break if some data was read.
540 */
541 if (nread > 0)
542 break;
543
544 /*
545 * detect EOF condition
546 * read returns 0 on EOF, no need to set error
547 */
548 if (rpipe->pipe_state & PIPE_EOF)
549 break;
550
551 /*
552 * don't block on non-blocking I/O
553 */
554 if (fp->f_flag & FNONBLOCK) {
555 error = EAGAIN;
556 break;
557 }
558
559 /*
560 * Unlock the pipe buffer for our remaining processing.
561 * We will either break out with an error or we will
562 * sleep and relock to loop.
563 */
564 pipeunlock(rpipe);
565
566 /*
567 * Re-check to see if more direct writes are pending.
568 */
569 if ((rpipe->pipe_state & PIPE_DIRECTR) != 0)
570 goto again;
571
572 /*
573 * We want to read more, wake up select/poll.
574 */
575 pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
576
577 /*
578 * If the "write-side" is blocked, wake it up now.
579 */
580 cv_broadcast(&rpipe->pipe_wcv);
581
582 /* Now wait until the pipe is filled */
583 error = cv_wait_sig(&rpipe->pipe_rcv, lock);
584 if (error != 0)
585 goto unlocked_error;
586 goto again;
587 }
588
589 if (error == 0)
590 getmicrotime(&rpipe->pipe_atime);
591 pipeunlock(rpipe);
592
593 unlocked_error:
594 --rpipe->pipe_busy;
595 if (rpipe->pipe_busy == 0) {
596 cv_broadcast(&rpipe->pipe_draincv);
597 }
598 if (bp->cnt < MINPIPESIZE) {
599 cv_broadcast(&rpipe->pipe_wcv);
600 }
601
602 /*
603 * If anything was read off the buffer, signal to the writer it's
604 * possible to write more data. Also send signal if we are here for the
605 * first time after last write.
606 */
607 if ((bp->size - bp->cnt) >= PIPE_BUF
608 && (ocnt != bp->cnt || (rpipe->pipe_state & PIPE_SIGNALR))) {
609 pipeselwakeup(rpipe, rpipe->pipe_peer, POLL_OUT);
610 rpipe->pipe_state &= ~PIPE_SIGNALR;
611 }
612
613 mutex_exit(lock);
614 return (error);
615 }
616
617 #ifndef PIPE_NODIRECT
618 /*
619 * Allocate structure for loan transfer.
620 */
621 static int
622 pipe_loan_alloc(struct pipe *wpipe, int npages)
623 {
624 vsize_t len;
625
626 len = (vsize_t)npages << PAGE_SHIFT;
627 atomic_add_int(&amountpipekva, len);
628 wpipe->pipe_map.kva = uvm_km_alloc(kernel_map, len, 0,
629 UVM_KMF_VAONLY | UVM_KMF_WAITVA);
630 if (wpipe->pipe_map.kva == 0) {
631 atomic_add_int(&amountpipekva, -len);
632 return (ENOMEM);
633 }
634
635 wpipe->pipe_map.npages = npages;
636 wpipe->pipe_map.pgs = kmem_alloc(npages * sizeof(struct vm_page *),
637 KM_SLEEP);
638 return (0);
639 }
640
641 /*
642 * Free resources allocated for loan transfer.
643 */
644 static void
645 pipe_loan_free(struct pipe *wpipe)
646 {
647 vsize_t len;
648
649 len = (vsize_t)wpipe->pipe_map.npages << PAGE_SHIFT;
650 uvm_km_free(kernel_map, wpipe->pipe_map.kva, len, UVM_KMF_VAONLY);
651 wpipe->pipe_map.kva = 0;
652 atomic_add_int(&amountpipekva, -len);
653 kmem_free(wpipe->pipe_map.pgs,
654 wpipe->pipe_map.npages * sizeof(struct vm_page *));
655 wpipe->pipe_map.pgs = NULL;
656 }
657
658 /*
659 * NetBSD direct write, using uvm_loan() mechanism.
660 * This implements the pipe buffer write mechanism. Note that only
661 * a direct write OR a normal pipe write can be pending at any given time.
662 * If there are any characters in the pipe buffer, the direct write will
663 * be deferred until the receiving process grabs all of the bytes from
664 * the pipe buffer. Then the direct mapping write is set-up.
665 *
666 * Called with the long-term pipe lock held.
667 */
668 static int
669 pipe_direct_write(struct file *fp, struct pipe *wpipe, struct uio *uio)
670 {
671 int error, npages, j;
672 struct vm_page **pgs;
673 vaddr_t bbase, kva, base, bend;
674 vsize_t blen, bcnt;
675 voff_t bpos;
676 kmutex_t *lock = wpipe->pipe_lock;
677
678 KASSERT(mutex_owned(wpipe->pipe_lock));
679 KASSERT(wpipe->pipe_map.cnt == 0);
680
681 mutex_exit(lock);
682
683 /*
684 * Handle first PIPE_CHUNK_SIZE bytes of buffer. Deal with buffers
685 * not aligned to PAGE_SIZE.
686 */
687 bbase = (vaddr_t)uio->uio_iov->iov_base;
688 base = trunc_page(bbase);
689 bend = round_page(bbase + uio->uio_iov->iov_len);
690 blen = bend - base;
691 bpos = bbase - base;
692
693 if (blen > PIPE_DIRECT_CHUNK) {
694 blen = PIPE_DIRECT_CHUNK;
695 bend = base + blen;
696 bcnt = PIPE_DIRECT_CHUNK - bpos;
697 } else {
698 bcnt = uio->uio_iov->iov_len;
699 }
700 npages = blen >> PAGE_SHIFT;
701
702 /*
703 * Free the old kva if we need more pages than we have
704 * allocated.
705 */
706 if (wpipe->pipe_map.kva != 0 && npages > wpipe->pipe_map.npages)
707 pipe_loan_free(wpipe);
708
709 /* Allocate new kva. */
710 if (wpipe->pipe_map.kva == 0) {
711 error = pipe_loan_alloc(wpipe, npages);
712 if (error) {
713 mutex_enter(lock);
714 return (error);
715 }
716 }
717
718 /* Loan the write buffer memory from writer process */
719 pgs = wpipe->pipe_map.pgs;
720 error = uvm_loan(&uio->uio_vmspace->vm_map, base, blen,
721 pgs, UVM_LOAN_TOPAGE);
722 if (error) {
723 pipe_loan_free(wpipe);
724 mutex_enter(lock);
725 return (ENOMEM); /* so that caller fallback to ordinary write */
726 }
727
728 /* Enter the loaned pages to kva */
729 kva = wpipe->pipe_map.kva;
730 for (j = 0; j < npages; j++, kva += PAGE_SIZE) {
731 pmap_kenter_pa(kva, VM_PAGE_TO_PHYS(pgs[j]), VM_PROT_READ);
732 }
733 pmap_update(pmap_kernel());
734
735 /* Now we can put the pipe in direct write mode */
736 wpipe->pipe_map.pos = bpos;
737 wpipe->pipe_map.cnt = bcnt;
738
739 /*
740 * But before we can let someone do a direct read, we
741 * have to wait until the pipe is drained. Release the
742 * pipe lock while we wait.
743 */
744 mutex_enter(lock);
745 wpipe->pipe_state |= PIPE_DIRECTW;
746 pipeunlock(wpipe);
747
748 while (error == 0 && wpipe->pipe_buffer.cnt > 0) {
749 cv_broadcast(&wpipe->pipe_rcv);
750 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
751 if (error == 0 && wpipe->pipe_state & PIPE_EOF)
752 error = EPIPE;
753 }
754
755 /* Pipe is drained; next read will off the direct buffer */
756 wpipe->pipe_state |= PIPE_DIRECTR;
757
758 /* Wait until the reader is done */
759 while (error == 0 && (wpipe->pipe_state & PIPE_DIRECTR)) {
760 cv_broadcast(&wpipe->pipe_rcv);
761 pipeselwakeup(wpipe, wpipe, POLL_IN);
762 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
763 if (error == 0 && wpipe->pipe_state & PIPE_EOF)
764 error = EPIPE;
765 }
766
767 /* Take pipe out of direct write mode */
768 wpipe->pipe_state &= ~(PIPE_DIRECTW | PIPE_DIRECTR);
769
770 /* Acquire the pipe lock and cleanup */
771 (void)pipelock(wpipe, 0);
772 mutex_exit(lock);
773
774 if (pgs != NULL) {
775 pmap_kremove(wpipe->pipe_map.kva, blen);
776 pmap_update(pmap_kernel());
777 uvm_unloan(pgs, npages, UVM_LOAN_TOPAGE);
778 }
779 if (error || amountpipekva > maxpipekva)
780 pipe_loan_free(wpipe);
781
782 mutex_enter(lock);
783 if (error) {
784 pipeselwakeup(wpipe, wpipe, POLL_ERR);
785
786 /*
787 * If nothing was read from what we offered, return error
788 * straight on. Otherwise update uio resid first. Caller
789 * will deal with the error condition, returning short
790 * write, error, or restarting the write(2) as appropriate.
791 */
792 if (wpipe->pipe_map.cnt == bcnt) {
793 wpipe->pipe_map.cnt = 0;
794 cv_broadcast(&wpipe->pipe_wcv);
795 return (error);
796 }
797
798 bcnt -= wpipe->pipe_map.cnt;
799 }
800
801 uio->uio_resid -= bcnt;
802 /* uio_offset not updated, not set/used for write(2) */
803 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + bcnt;
804 uio->uio_iov->iov_len -= bcnt;
805 if (uio->uio_iov->iov_len == 0) {
806 uio->uio_iov++;
807 uio->uio_iovcnt--;
808 }
809
810 wpipe->pipe_map.cnt = 0;
811 return (error);
812 }
813 #endif /* !PIPE_NODIRECT */
814
815 static int
816 pipe_write(struct file *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
817 int flags)
818 {
819 struct pipe *wpipe, *rpipe;
820 struct pipebuf *bp;
821 kmutex_t *lock;
822 int error;
823
824 /* We want to write to our peer */
825 rpipe = (struct pipe *) fp->f_data;
826 lock = rpipe->pipe_lock;
827 error = 0;
828
829 mutex_enter(lock);
830 wpipe = rpipe->pipe_peer;
831
832 /*
833 * Detect loss of pipe read side, issue SIGPIPE if lost.
834 */
835 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) != 0) {
836 mutex_exit(lock);
837 return EPIPE;
838 }
839 ++wpipe->pipe_busy;
840
841 /* Aquire the long-term pipe lock */
842 if ((error = pipelock(wpipe, 1)) != 0) {
843 --wpipe->pipe_busy;
844 if (wpipe->pipe_busy == 0) {
845 cv_broadcast(&wpipe->pipe_draincv);
846 }
847 mutex_exit(lock);
848 return (error);
849 }
850
851 bp = &wpipe->pipe_buffer;
852
853 /*
854 * If it is advantageous to resize the pipe buffer, do so.
855 */
856 if ((uio->uio_resid > PIPE_SIZE) &&
857 (nbigpipe < maxbigpipes) &&
858 #ifndef PIPE_NODIRECT
859 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
860 #endif
861 (bp->size <= PIPE_SIZE) && (bp->cnt == 0)) {
862
863 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
864 atomic_inc_uint(&nbigpipe);
865 }
866
867 while (uio->uio_resid) {
868 size_t space;
869
870 #ifndef PIPE_NODIRECT
871 /*
872 * Pipe buffered writes cannot be coincidental with
873 * direct writes. Also, only one direct write can be
874 * in progress at any one time. We wait until the currently
875 * executing direct write is completed before continuing.
876 *
877 * We break out if a signal occurs or the reader goes away.
878 */
879 while (error == 0 && wpipe->pipe_state & PIPE_DIRECTW) {
880 cv_broadcast(&wpipe->pipe_rcv);
881 pipeunlock(wpipe);
882 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
883 (void)pipelock(wpipe, 0);
884 if (wpipe->pipe_state & PIPE_EOF)
885 error = EPIPE;
886 }
887 if (error)
888 break;
889
890 /*
891 * If the transfer is large, we can gain performance if
892 * we do process-to-process copies directly.
893 * If the write is non-blocking, we don't use the
894 * direct write mechanism.
895 *
896 * The direct write mechanism will detect the reader going
897 * away on us.
898 */
899 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
900 (fp->f_flag & FNONBLOCK) == 0 &&
901 (wpipe->pipe_map.kva || (amountpipekva < limitpipekva))) {
902 error = pipe_direct_write(fp, wpipe, uio);
903
904 /*
905 * Break out if error occurred, unless it's ENOMEM.
906 * ENOMEM means we failed to allocate some resources
907 * for direct write, so we just fallback to ordinary
908 * write. If the direct write was successful,
909 * process rest of data via ordinary write.
910 */
911 if (error == 0)
912 continue;
913
914 if (error != ENOMEM)
915 break;
916 }
917 #endif /* PIPE_NODIRECT */
918
919 space = bp->size - bp->cnt;
920
921 /* Writes of size <= PIPE_BUF must be atomic. */
922 if ((space < uio->uio_resid) && (uio->uio_resid <= PIPE_BUF))
923 space = 0;
924
925 if (space > 0) {
926 int size; /* Transfer size */
927 int segsize; /* first segment to transfer */
928
929 /*
930 * Transfer size is minimum of uio transfer
931 * and free space in pipe buffer.
932 */
933 if (space > uio->uio_resid)
934 size = uio->uio_resid;
935 else
936 size = space;
937 /*
938 * First segment to transfer is minimum of
939 * transfer size and contiguous space in
940 * pipe buffer. If first segment to transfer
941 * is less than the transfer size, we've got
942 * a wraparound in the buffer.
943 */
944 segsize = bp->size - bp->in;
945 if (segsize > size)
946 segsize = size;
947
948 /* Transfer first segment */
949 mutex_exit(lock);
950 error = uiomove((char *)bp->buffer + bp->in, segsize,
951 uio);
952
953 if (error == 0 && segsize < size) {
954 /*
955 * Transfer remaining part now, to
956 * support atomic writes. Wraparound
957 * happened.
958 */
959 #ifdef DEBUG
960 if (bp->in + segsize != bp->size)
961 panic("Expected pipe buffer wraparound disappeared");
962 #endif
963
964 error = uiomove(bp->buffer,
965 size - segsize, uio);
966 }
967 mutex_enter(lock);
968 if (error)
969 break;
970
971 bp->in += size;
972 if (bp->in >= bp->size) {
973 #ifdef DEBUG
974 if (bp->in != size - segsize + bp->size)
975 panic("Expected wraparound bad");
976 #endif
977 bp->in = size - segsize;
978 }
979
980 bp->cnt += size;
981 #ifdef DEBUG
982 if (bp->cnt > bp->size)
983 panic("Pipe buffer overflow");
984 #endif
985 } else {
986 /*
987 * If the "read-side" has been blocked, wake it up now.
988 */
989 cv_broadcast(&wpipe->pipe_rcv);
990
991 /*
992 * don't block on non-blocking I/O
993 */
994 if (fp->f_flag & FNONBLOCK) {
995 error = EAGAIN;
996 break;
997 }
998
999 /*
1000 * We have no more space and have something to offer,
1001 * wake up select/poll.
1002 */
1003 if (bp->cnt)
1004 pipeselwakeup(wpipe, wpipe, POLL_IN);
1005
1006 pipeunlock(wpipe);
1007 error = cv_wait_sig(&wpipe->pipe_wcv, lock);
1008 (void)pipelock(wpipe, 0);
1009 if (error != 0)
1010 break;
1011 /*
1012 * If read side wants to go away, we just issue a signal
1013 * to ourselves.
1014 */
1015 if (wpipe->pipe_state & PIPE_EOF) {
1016 error = EPIPE;
1017 break;
1018 }
1019 }
1020 }
1021
1022 --wpipe->pipe_busy;
1023 if (wpipe->pipe_busy == 0) {
1024 cv_broadcast(&wpipe->pipe_draincv);
1025 }
1026 if (bp->cnt > 0) {
1027 cv_broadcast(&wpipe->pipe_rcv);
1028 }
1029
1030 /*
1031 * Don't return EPIPE if I/O was successful
1032 */
1033 if (error == EPIPE && bp->cnt == 0 && uio->uio_resid == 0)
1034 error = 0;
1035
1036 if (error == 0)
1037 getmicrotime(&wpipe->pipe_mtime);
1038
1039 /*
1040 * We have something to offer, wake up select/poll.
1041 * wpipe->pipe_map.cnt is always 0 in this point (direct write
1042 * is only done synchronously), so check only wpipe->pipe_buffer.cnt
1043 */
1044 if (bp->cnt)
1045 pipeselwakeup(wpipe, wpipe, POLL_IN);
1046
1047 /*
1048 * Arrange for next read(2) to do a signal.
1049 */
1050 wpipe->pipe_state |= PIPE_SIGNALR;
1051
1052 pipeunlock(wpipe);
1053 mutex_exit(lock);
1054 return (error);
1055 }
1056
1057 /*
1058 * we implement a very minimal set of ioctls for compatibility with sockets.
1059 */
1060 int
1061 pipe_ioctl(struct file *fp, u_long cmd, void *data)
1062 {
1063 struct pipe *pipe = fp->f_data;
1064 kmutex_t *lock = pipe->pipe_lock;
1065
1066 switch (cmd) {
1067
1068 case FIONBIO:
1069 return (0);
1070
1071 case FIOASYNC:
1072 mutex_enter(lock);
1073 if (*(int *)data) {
1074 pipe->pipe_state |= PIPE_ASYNC;
1075 } else {
1076 pipe->pipe_state &= ~PIPE_ASYNC;
1077 }
1078 mutex_exit(lock);
1079 return (0);
1080
1081 case FIONREAD:
1082 mutex_enter(lock);
1083 #ifndef PIPE_NODIRECT
1084 if (pipe->pipe_state & PIPE_DIRECTW)
1085 *(int *)data = pipe->pipe_map.cnt;
1086 else
1087 #endif
1088 *(int *)data = pipe->pipe_buffer.cnt;
1089 mutex_exit(lock);
1090 return (0);
1091
1092 case FIONWRITE:
1093 /* Look at other side */
1094 pipe = pipe->pipe_peer;
1095 mutex_enter(lock);
1096 #ifndef PIPE_NODIRECT
1097 if (pipe->pipe_state & PIPE_DIRECTW)
1098 *(int *)data = pipe->pipe_map.cnt;
1099 else
1100 #endif
1101 *(int *)data = pipe->pipe_buffer.cnt;
1102 mutex_exit(lock);
1103 return (0);
1104
1105 case FIONSPACE:
1106 /* Look at other side */
1107 pipe = pipe->pipe_peer;
1108 mutex_enter(lock);
1109 #ifndef PIPE_NODIRECT
1110 /*
1111 * If we're in direct-mode, we don't really have a
1112 * send queue, and any other write will block. Thus
1113 * zero seems like the best answer.
1114 */
1115 if (pipe->pipe_state & PIPE_DIRECTW)
1116 *(int *)data = 0;
1117 else
1118 #endif
1119 *(int *)data = pipe->pipe_buffer.size -
1120 pipe->pipe_buffer.cnt;
1121 mutex_exit(lock);
1122 return (0);
1123
1124 case TIOCSPGRP:
1125 case FIOSETOWN:
1126 return fsetown(&pipe->pipe_pgid, cmd, data);
1127
1128 case TIOCGPGRP:
1129 case FIOGETOWN:
1130 return fgetown(pipe->pipe_pgid, cmd, data);
1131
1132 }
1133 return (EPASSTHROUGH);
1134 }
1135
1136 int
1137 pipe_poll(struct file *fp, int events)
1138 {
1139 struct pipe *rpipe = fp->f_data;
1140 struct pipe *wpipe;
1141 int eof = 0;
1142 int revents = 0;
1143
1144 mutex_enter(rpipe->pipe_lock);
1145 wpipe = rpipe->pipe_peer;
1146
1147 if (events & (POLLIN | POLLRDNORM))
1148 if ((rpipe->pipe_buffer.cnt > 0) ||
1149 #ifndef PIPE_NODIRECT
1150 (rpipe->pipe_state & PIPE_DIRECTR) ||
1151 #endif
1152 (rpipe->pipe_state & PIPE_EOF))
1153 revents |= events & (POLLIN | POLLRDNORM);
1154
1155 eof |= (rpipe->pipe_state & PIPE_EOF);
1156
1157 if (wpipe == NULL)
1158 revents |= events & (POLLOUT | POLLWRNORM);
1159 else {
1160 if (events & (POLLOUT | POLLWRNORM))
1161 if ((wpipe->pipe_state & PIPE_EOF) || (
1162 #ifndef PIPE_NODIRECT
1163 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
1164 #endif
1165 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1166 revents |= events & (POLLOUT | POLLWRNORM);
1167
1168 eof |= (wpipe->pipe_state & PIPE_EOF);
1169 }
1170
1171 if (wpipe == NULL || eof)
1172 revents |= POLLHUP;
1173
1174 if (revents == 0) {
1175 if (events & (POLLIN | POLLRDNORM))
1176 selrecord(curlwp, &rpipe->pipe_sel);
1177
1178 if (events & (POLLOUT | POLLWRNORM))
1179 selrecord(curlwp, &wpipe->pipe_sel);
1180 }
1181 mutex_exit(rpipe->pipe_lock);
1182
1183 return (revents);
1184 }
1185
1186 static int
1187 pipe_stat(struct file *fp, struct stat *ub)
1188 {
1189 struct pipe *pipe = fp->f_data;
1190
1191 memset((void *)ub, 0, sizeof(*ub));
1192 ub->st_mode = S_IFIFO | S_IRUSR | S_IWUSR;
1193 ub->st_blksize = pipe->pipe_buffer.size;
1194 if (ub->st_blksize == 0 && pipe->pipe_peer)
1195 ub->st_blksize = pipe->pipe_peer->pipe_buffer.size;
1196 ub->st_size = pipe->pipe_buffer.cnt;
1197 ub->st_blocks = (ub->st_size) ? 1 : 0;
1198 TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec);
1199 TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
1200 TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
1201 ub->st_uid = kauth_cred_geteuid(fp->f_cred);
1202 ub->st_gid = kauth_cred_getegid(fp->f_cred);
1203
1204 /*
1205 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1206 * XXX (st_dev, st_ino) should be unique.
1207 */
1208 return (0);
1209 }
1210
1211 /* ARGSUSED */
1212 static int
1213 pipe_close(struct file *fp)
1214 {
1215 struct pipe *pipe = fp->f_data;
1216
1217 fp->f_data = NULL;
1218 pipeclose(fp, pipe);
1219 return (0);
1220 }
1221
1222 static void
1223 pipe_free_kmem(struct pipe *pipe)
1224 {
1225
1226 if (pipe->pipe_buffer.buffer != NULL) {
1227 if (pipe->pipe_buffer.size > PIPE_SIZE) {
1228 atomic_dec_uint(&nbigpipe);
1229 }
1230 if (pipe->pipe_buffer.buffer != (void *)pipe->pipe_kmem) {
1231 uvm_km_free(kernel_map,
1232 (vaddr_t)pipe->pipe_buffer.buffer,
1233 pipe->pipe_buffer.size, UVM_KMF_PAGEABLE);
1234 atomic_add_int(&amountpipekva,
1235 -pipe->pipe_buffer.size);
1236 }
1237 pipe->pipe_buffer.buffer = NULL;
1238 }
1239 #ifndef PIPE_NODIRECT
1240 if (pipe->pipe_map.kva != 0) {
1241 pipe_loan_free(pipe);
1242 pipe->pipe_map.cnt = 0;
1243 pipe->pipe_map.kva = 0;
1244 pipe->pipe_map.pos = 0;
1245 pipe->pipe_map.npages = 0;
1246 }
1247 #endif /* !PIPE_NODIRECT */
1248 }
1249
1250 /*
1251 * shutdown the pipe
1252 */
1253 static void
1254 pipeclose(struct file *fp, struct pipe *pipe)
1255 {
1256 kmutex_t *lock;
1257 struct pipe *ppipe;
1258
1259 if (pipe == NULL)
1260 return;
1261
1262 KASSERT(cv_is_valid(&pipe->pipe_rcv));
1263 KASSERT(cv_is_valid(&pipe->pipe_wcv));
1264 KASSERT(cv_is_valid(&pipe->pipe_draincv));
1265 KASSERT(cv_is_valid(&pipe->pipe_lkcv));
1266
1267 lock = pipe->pipe_lock;
1268 mutex_enter(lock);
1269 pipeselwakeup(pipe, pipe, POLL_HUP);
1270
1271 /*
1272 * If the other side is blocked, wake it up saying that
1273 * we want to close it down.
1274 */
1275 pipe->pipe_state |= PIPE_EOF;
1276 if (pipe->pipe_busy) {
1277 while (pipe->pipe_busy) {
1278 cv_broadcast(&pipe->pipe_wcv);
1279 cv_wait_sig(&pipe->pipe_draincv, lock);
1280 }
1281 }
1282
1283 /*
1284 * Disconnect from peer
1285 */
1286 if ((ppipe = pipe->pipe_peer) != NULL) {
1287 pipeselwakeup(ppipe, ppipe, POLL_HUP);
1288 ppipe->pipe_state |= PIPE_EOF;
1289 cv_broadcast(&ppipe->pipe_rcv);
1290 ppipe->pipe_peer = NULL;
1291 }
1292
1293 /*
1294 * Any knote objects still left in the list are
1295 * the one attached by peer. Since no one will
1296 * traverse this list, we just clear it.
1297 */
1298 SLIST_INIT(&pipe->pipe_sel.sel_klist);
1299
1300 KASSERT((pipe->pipe_state & PIPE_LOCKFL) == 0);
1301 mutex_exit(lock);
1302
1303 /*
1304 * free resources
1305 */
1306 pipe->pipe_pgid = 0;
1307 pipe->pipe_state = PIPE_SIGNALR;
1308 pipe_free_kmem(pipe);
1309 if (pipe->pipe_kmem != 0) {
1310 pool_cache_put(pipe_rd_cache, pipe);
1311 } else {
1312 pool_cache_put(pipe_wr_cache, pipe);
1313 }
1314 mutex_obj_free(lock);
1315 }
1316
1317 static void
1318 filt_pipedetach(struct knote *kn)
1319 {
1320 struct pipe *pipe;
1321 kmutex_t *lock;
1322
1323 pipe = ((file_t *)kn->kn_obj)->f_data;
1324 lock = pipe->pipe_lock;
1325
1326 mutex_enter(lock);
1327
1328 switch(kn->kn_filter) {
1329 case EVFILT_WRITE:
1330 /* need the peer structure, not our own */
1331 pipe = pipe->pipe_peer;
1332
1333 /* if reader end already closed, just return */
1334 if (pipe == NULL) {
1335 mutex_exit(lock);
1336 return;
1337 }
1338
1339 break;
1340 default:
1341 /* nothing to do */
1342 break;
1343 }
1344
1345 #ifdef DIAGNOSTIC
1346 if (kn->kn_hook != pipe)
1347 panic("filt_pipedetach: inconsistent knote");
1348 #endif
1349
1350 SLIST_REMOVE(&pipe->pipe_sel.sel_klist, kn, knote, kn_selnext);
1351 mutex_exit(lock);
1352 }
1353
1354 /*ARGSUSED*/
1355 static int
1356 filt_piperead(struct knote *kn, long hint)
1357 {
1358 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
1359 struct pipe *wpipe;
1360
1361 if ((hint & NOTE_SUBMIT) == 0) {
1362 mutex_enter(rpipe->pipe_lock);
1363 }
1364 wpipe = rpipe->pipe_peer;
1365 kn->kn_data = rpipe->pipe_buffer.cnt;
1366
1367 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1368 kn->kn_data = rpipe->pipe_map.cnt;
1369
1370 if ((rpipe->pipe_state & PIPE_EOF) ||
1371 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1372 kn->kn_flags |= EV_EOF;
1373 if ((hint & NOTE_SUBMIT) == 0) {
1374 mutex_exit(rpipe->pipe_lock);
1375 }
1376 return (1);
1377 }
1378
1379 if ((hint & NOTE_SUBMIT) == 0) {
1380 mutex_exit(rpipe->pipe_lock);
1381 }
1382 return (kn->kn_data > 0);
1383 }
1384
1385 /*ARGSUSED*/
1386 static int
1387 filt_pipewrite(struct knote *kn, long hint)
1388 {
1389 struct pipe *rpipe = ((file_t *)kn->kn_obj)->f_data;
1390 struct pipe *wpipe;
1391
1392 if ((hint & NOTE_SUBMIT) == 0) {
1393 mutex_enter(rpipe->pipe_lock);
1394 }
1395 wpipe = rpipe->pipe_peer;
1396
1397 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1398 kn->kn_data = 0;
1399 kn->kn_flags |= EV_EOF;
1400 if ((hint & NOTE_SUBMIT) == 0) {
1401 mutex_exit(rpipe->pipe_lock);
1402 }
1403 return (1);
1404 }
1405 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1406 if (wpipe->pipe_state & PIPE_DIRECTW)
1407 kn->kn_data = 0;
1408
1409 if ((hint & NOTE_SUBMIT) == 0) {
1410 mutex_exit(rpipe->pipe_lock);
1411 }
1412 return (kn->kn_data >= PIPE_BUF);
1413 }
1414
1415 static const struct filterops pipe_rfiltops =
1416 { 1, NULL, filt_pipedetach, filt_piperead };
1417 static const struct filterops pipe_wfiltops =
1418 { 1, NULL, filt_pipedetach, filt_pipewrite };
1419
1420 /*ARGSUSED*/
1421 static int
1422 pipe_kqfilter(struct file *fp, struct knote *kn)
1423 {
1424 struct pipe *pipe;
1425 kmutex_t *lock;
1426
1427 pipe = ((file_t *)kn->kn_obj)->f_data;
1428 lock = pipe->pipe_lock;
1429
1430 mutex_enter(lock);
1431
1432 switch (kn->kn_filter) {
1433 case EVFILT_READ:
1434 kn->kn_fop = &pipe_rfiltops;
1435 break;
1436 case EVFILT_WRITE:
1437 kn->kn_fop = &pipe_wfiltops;
1438 pipe = pipe->pipe_peer;
1439 if (pipe == NULL) {
1440 /* other end of pipe has been closed */
1441 mutex_exit(lock);
1442 return (EBADF);
1443 }
1444 break;
1445 default:
1446 mutex_exit(lock);
1447 return (EINVAL);
1448 }
1449
1450 kn->kn_hook = pipe;
1451 SLIST_INSERT_HEAD(&pipe->pipe_sel.sel_klist, kn, kn_selnext);
1452 mutex_exit(lock);
1453
1454 return (0);
1455 }
1456
1457 /*
1458 * Handle pipe sysctls.
1459 */
1460 SYSCTL_SETUP(sysctl_kern_pipe_setup, "sysctl kern.pipe subtree setup")
1461 {
1462
1463 sysctl_createv(clog, 0, NULL, NULL,
1464 CTLFLAG_PERMANENT,
1465 CTLTYPE_NODE, "kern", NULL,
1466 NULL, 0, NULL, 0,
1467 CTL_KERN, CTL_EOL);
1468 sysctl_createv(clog, 0, NULL, NULL,
1469 CTLFLAG_PERMANENT,
1470 CTLTYPE_NODE, "pipe",
1471 SYSCTL_DESCR("Pipe settings"),
1472 NULL, 0, NULL, 0,
1473 CTL_KERN, KERN_PIPE, CTL_EOL);
1474
1475 sysctl_createv(clog, 0, NULL, NULL,
1476 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1477 CTLTYPE_INT, "maxkvasz",
1478 SYSCTL_DESCR("Maximum amount of kernel memory to be "
1479 "used for pipes"),
1480 NULL, 0, &maxpipekva, 0,
1481 CTL_KERN, KERN_PIPE, KERN_PIPE_MAXKVASZ, CTL_EOL);
1482 sysctl_createv(clog, 0, NULL, NULL,
1483 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1484 CTLTYPE_INT, "maxloankvasz",
1485 SYSCTL_DESCR("Limit for direct transfers via page loan"),
1486 NULL, 0, &limitpipekva, 0,
1487 CTL_KERN, KERN_PIPE, KERN_PIPE_LIMITKVA, CTL_EOL);
1488 sysctl_createv(clog, 0, NULL, NULL,
1489 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1490 CTLTYPE_INT, "maxbigpipes",
1491 SYSCTL_DESCR("Maximum number of \"big\" pipes"),
1492 NULL, 0, &maxbigpipes, 0,
1493 CTL_KERN, KERN_PIPE, KERN_PIPE_MAXBIGPIPES, CTL_EOL);
1494 sysctl_createv(clog, 0, NULL, NULL,
1495 CTLFLAG_PERMANENT,
1496 CTLTYPE_INT, "nbigpipes",
1497 SYSCTL_DESCR("Number of \"big\" pipes"),
1498 NULL, 0, &nbigpipe, 0,
1499 CTL_KERN, KERN_PIPE, KERN_PIPE_NBIGPIPES, CTL_EOL);
1500 sysctl_createv(clog, 0, NULL, NULL,
1501 CTLFLAG_PERMANENT,
1502 CTLTYPE_INT, "kvasize",
1503 SYSCTL_DESCR("Amount of kernel memory consumed by pipe "
1504 "buffers"),
1505 NULL, 0, &amountpipekva, 0,
1506 CTL_KERN, KERN_PIPE, KERN_PIPE_KVASIZE, CTL_EOL);
1507 }
Cache object: c9770e0e140339eb60258f69c6e05c3a
|