FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_pipe.c
1 /*-
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 */
19
20 /*
21 * This file contains a high-performance replacement for the socket-based
22 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
23 * all features of sockets, but does do everything that pipes normally
24 * do.
25 */
26
27 /*
28 * This code has two modes of operation, a small write mode and a large
29 * write mode. The small write mode acts like conventional pipes with
30 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
31 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
32 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
33 * the receiving process can copy it directly from the pages in the sending
34 * process.
35 *
36 * If the sending process receives a signal, it is possible that it will
37 * go away, and certainly its address space can change, because control
38 * is returned back to the user-mode side. In that case, the pipe code
39 * arranges to copy the buffer supplied by the user process, to a pageable
40 * kernel buffer, and the receiving process will grab the data from the
41 * pageable kernel buffer. Since signals don't happen all that often,
42 * the copy operation is normally eliminated.
43 *
44 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
45 * happen for small transfers so that the system will not spend all of
46 * its time context switching.
47 *
48 * In order to limit the resource use of pipes, two sysctls exist:
49 *
50 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
51 * address space available to us in pipe_map. This value is normally
52 * autotuned, but may also be loader tuned.
53 *
54 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
55 * memory in use by pipes.
56 *
57 * Based on how large pipekva is relative to maxpipekva, the following
58 * will happen:
59 *
60 * 0% - 50%:
61 * New pipes are given 16K of memory backing, pipes may dynamically
62 * grow to as large as 64K where needed.
63 * 50% - 75%:
64 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
65 * existing pipes may NOT grow.
66 * 75% - 100%:
67 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
68 * existing pipes will be shrunk down to 4K whenever possible.
69 *
70 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If
71 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
72 * resize which MUST occur for reverse-direction pipes when they are
73 * first used.
74 *
75 * Additional information about the current state of pipes may be obtained
76 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
77 * and kern.ipc.piperesizefail.
78 *
79 * Locking rules: There are two locks present here: A mutex, used via
80 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via
81 * the flag, as mutexes can not persist over uiomove. The mutex
82 * exists only to guard access to the flag, and is not in itself a
83 * locking mechanism. Also note that there is only a single mutex for
84 * both directions of a pipe.
85 *
86 * As pipelock() may have to sleep before it can acquire the flag, it
87 * is important to reread all data after a call to pipelock(); everything
88 * in the structure may have changed.
89 */
90
91 #include <sys/cdefs.h>
92 __FBSDID("$FreeBSD$");
93
94 #include "opt_mac.h"
95
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/fcntl.h>
99 #include <sys/file.h>
100 #include <sys/filedesc.h>
101 #include <sys/filio.h>
102 #include <sys/kernel.h>
103 #include <sys/lock.h>
104 #include <sys/mutex.h>
105 #include <sys/ttycom.h>
106 #include <sys/stat.h>
107 #include <sys/malloc.h>
108 #include <sys/poll.h>
109 #include <sys/selinfo.h>
110 #include <sys/signalvar.h>
111 #include <sys/sysctl.h>
112 #include <sys/sysproto.h>
113 #include <sys/pipe.h>
114 #include <sys/proc.h>
115 #include <sys/vnode.h>
116 #include <sys/uio.h>
117 #include <sys/event.h>
118
119 #include <security/mac/mac_framework.h>
120
121 #include <vm/vm.h>
122 #include <vm/vm_param.h>
123 #include <vm/vm_object.h>
124 #include <vm/vm_kern.h>
125 #include <vm/vm_extern.h>
126 #include <vm/pmap.h>
127 #include <vm/vm_map.h>
128 #include <vm/vm_page.h>
129 #include <vm/uma.h>
130
131 /*
132 * Use this define if you want to disable *fancy* VM things. Expect an
133 * approx 30% decrease in transfer rate. This could be useful for
134 * NetBSD or OpenBSD.
135 */
136 /* #define PIPE_NODIRECT */
137
138 /*
139 * interfaces to the outside world
140 */
141 static fo_rdwr_t pipe_read;
142 static fo_rdwr_t pipe_write;
143 static fo_ioctl_t pipe_ioctl;
144 static fo_poll_t pipe_poll;
145 static fo_kqfilter_t pipe_kqfilter;
146 static fo_stat_t pipe_stat;
147 static fo_close_t pipe_close;
148
149 static struct fileops pipeops = {
150 .fo_read = pipe_read,
151 .fo_write = pipe_write,
152 .fo_ioctl = pipe_ioctl,
153 .fo_poll = pipe_poll,
154 .fo_kqfilter = pipe_kqfilter,
155 .fo_stat = pipe_stat,
156 .fo_close = pipe_close,
157 .fo_flags = DFLAG_PASSABLE
158 };
159
160 static void filt_pipedetach(struct knote *kn);
161 static int filt_piperead(struct knote *kn, long hint);
162 static int filt_pipewrite(struct knote *kn, long hint);
163
164 static struct filterops pipe_rfiltops =
165 { 1, NULL, filt_pipedetach, filt_piperead };
166 static struct filterops pipe_wfiltops =
167 { 1, NULL, filt_pipedetach, filt_pipewrite };
168
169 /*
170 * Default pipe buffer size(s), this can be kind-of large now because pipe
171 * space is pageable. The pipe code will try to maintain locality of
172 * reference for performance reasons, so small amounts of outstanding I/O
173 * will not wipe the cache.
174 */
175 #define MINPIPESIZE (PIPE_SIZE/3)
176 #define MAXPIPESIZE (2*PIPE_SIZE/3)
177
178 static int amountpipekva;
179 static int pipefragretry;
180 static int pipeallocfail;
181 static int piperesizefail;
182 static int piperesizeallowed = 1;
183
184 SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN,
185 &maxpipekva, 0, "Pipe KVA limit");
186 SYSCTL_INT(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
187 &amountpipekva, 0, "Pipe KVA usage");
188 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
189 &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
190 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
191 &pipeallocfail, 0, "Pipe allocation failures");
192 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
193 &piperesizefail, 0, "Pipe resize failures");
194 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
195 &piperesizeallowed, 0, "Pipe resizing allowed");
196
197 static void pipeinit(void *dummy __unused);
198 static void pipeclose(struct pipe *cpipe);
199 static void pipe_free_kmem(struct pipe *cpipe);
200 static int pipe_create(struct pipe *pipe, int backing);
201 static __inline int pipelock(struct pipe *cpipe, int catch);
202 static __inline void pipeunlock(struct pipe *cpipe);
203 static __inline void pipeselwakeup(struct pipe *cpipe);
204 #ifndef PIPE_NODIRECT
205 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
206 static void pipe_destroy_write_buffer(struct pipe *wpipe);
207 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
208 static void pipe_clone_write_buffer(struct pipe *wpipe);
209 #endif
210 static int pipespace(struct pipe *cpipe, int size);
211 static int pipespace_new(struct pipe *cpipe, int size);
212
213 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags);
214 static int pipe_zone_init(void *mem, int size, int flags);
215 static void pipe_zone_fini(void *mem, int size);
216
217 static uma_zone_t pipe_zone;
218
219 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
220
221 static void
222 pipeinit(void *dummy __unused)
223 {
224
225 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
226 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
227 UMA_ALIGN_PTR, 0);
228 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
229 }
230
231 static int
232 pipe_zone_ctor(void *mem, int size, void *arg, int flags)
233 {
234 struct pipepair *pp;
235 struct pipe *rpipe, *wpipe;
236
237 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
238
239 pp = (struct pipepair *)mem;
240
241 /*
242 * We zero both pipe endpoints to make sure all the kmem pointers
243 * are NULL, flag fields are zero'd, etc. We timestamp both
244 * endpoints with the same time.
245 */
246 rpipe = &pp->pp_rpipe;
247 bzero(rpipe, sizeof(*rpipe));
248 vfs_timestamp(&rpipe->pipe_ctime);
249 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
250
251 wpipe = &pp->pp_wpipe;
252 bzero(wpipe, sizeof(*wpipe));
253 wpipe->pipe_ctime = rpipe->pipe_ctime;
254 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
255
256 rpipe->pipe_peer = wpipe;
257 rpipe->pipe_pair = pp;
258 wpipe->pipe_peer = rpipe;
259 wpipe->pipe_pair = pp;
260
261 /*
262 * Mark both endpoints as present; they will later get free'd
263 * one at a time. When both are free'd, then the whole pair
264 * is released.
265 */
266 rpipe->pipe_present = PIPE_ACTIVE;
267 wpipe->pipe_present = PIPE_ACTIVE;
268
269 /*
270 * Eventually, the MAC Framework may initialize the label
271 * in ctor or init, but for now we do it elswhere to avoid
272 * blocking in ctor or init.
273 */
274 pp->pp_label = NULL;
275
276 return (0);
277 }
278
279 static int
280 pipe_zone_init(void *mem, int size, int flags)
281 {
282 struct pipepair *pp;
283
284 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
285
286 pp = (struct pipepair *)mem;
287
288 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
289 return (0);
290 }
291
292 static void
293 pipe_zone_fini(void *mem, int size)
294 {
295 struct pipepair *pp;
296
297 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
298
299 pp = (struct pipepair *)mem;
300
301 mtx_destroy(&pp->pp_mtx);
302 }
303
304 /*
305 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
306 * the zone pick up the pieces via pipeclose().
307 */
308 /* ARGSUSED */
309 int
310 pipe(td, uap)
311 struct thread *td;
312 struct pipe_args /* {
313 int dummy;
314 } */ *uap;
315 {
316 struct filedesc *fdp = td->td_proc->p_fd;
317 struct file *rf, *wf;
318 struct pipepair *pp;
319 struct pipe *rpipe, *wpipe;
320 int fd, error;
321
322 pp = uma_zalloc(pipe_zone, M_WAITOK);
323 #ifdef MAC
324 /*
325 * The MAC label is shared between the connected endpoints. As a
326 * result mac_init_pipe() and mac_create_pipe() are called once
327 * for the pair, and not on the endpoints.
328 */
329 mac_init_pipe(pp);
330 mac_create_pipe(td->td_ucred, pp);
331 #endif
332 rpipe = &pp->pp_rpipe;
333 wpipe = &pp->pp_wpipe;
334
335 knlist_init(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe), NULL, NULL,
336 NULL);
337 knlist_init(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe), NULL, NULL,
338 NULL);
339
340 /* Only the forward direction pipe is backed by default */
341 if ((error = pipe_create(rpipe, 1)) != 0 ||
342 (error = pipe_create(wpipe, 0)) != 0) {
343 pipeclose(rpipe);
344 pipeclose(wpipe);
345 return (error);
346 }
347
348 rpipe->pipe_state |= PIPE_DIRECTOK;
349 wpipe->pipe_state |= PIPE_DIRECTOK;
350
351 error = falloc(td, &rf, &fd);
352 if (error) {
353 pipeclose(rpipe);
354 pipeclose(wpipe);
355 return (error);
356 }
357 /* An extra reference on `rf' has been held for us by falloc(). */
358 td->td_retval[0] = fd;
359
360 /*
361 * Warning: once we've gotten past allocation of the fd for the
362 * read-side, we can only drop the read side via fdrop() in order
363 * to avoid races against processes which manage to dup() the read
364 * side while we are blocked trying to allocate the write side.
365 */
366 FILE_LOCK(rf);
367 rf->f_flag = FREAD | FWRITE;
368 rf->f_type = DTYPE_PIPE;
369 rf->f_data = rpipe;
370 rf->f_ops = &pipeops;
371 FILE_UNLOCK(rf);
372 error = falloc(td, &wf, &fd);
373 if (error) {
374 fdclose(fdp, rf, td->td_retval[0], td);
375 fdrop(rf, td);
376 /* rpipe has been closed by fdrop(). */
377 pipeclose(wpipe);
378 return (error);
379 }
380 /* An extra reference on `wf' has been held for us by falloc(). */
381 FILE_LOCK(wf);
382 wf->f_flag = FREAD | FWRITE;
383 wf->f_type = DTYPE_PIPE;
384 wf->f_data = wpipe;
385 wf->f_ops = &pipeops;
386 FILE_UNLOCK(wf);
387 fdrop(wf, td);
388 td->td_retval[1] = fd;
389 fdrop(rf, td);
390
391 return (0);
392 }
393
394 /*
395 * Allocate kva for pipe circular buffer, the space is pageable
396 * This routine will 'realloc' the size of a pipe safely, if it fails
397 * it will retain the old buffer.
398 * If it fails it will return ENOMEM.
399 */
400 static int
401 pipespace_new(cpipe, size)
402 struct pipe *cpipe;
403 int size;
404 {
405 caddr_t buffer;
406 int error, cnt, firstseg;
407 static int curfail = 0;
408 static struct timeval lastfail;
409
410 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
411 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
412 ("pipespace: resize of direct writes not allowed"));
413 retry:
414 cnt = cpipe->pipe_buffer.cnt;
415 if (cnt > size)
416 size = cnt;
417
418 size = round_page(size);
419 buffer = (caddr_t) vm_map_min(pipe_map);
420
421 error = vm_map_find(pipe_map, NULL, 0,
422 (vm_offset_t *) &buffer, size, 1,
423 VM_PROT_ALL, VM_PROT_ALL, 0);
424 if (error != KERN_SUCCESS) {
425 if ((cpipe->pipe_buffer.buffer == NULL) &&
426 (size > SMALL_PIPE_SIZE)) {
427 size = SMALL_PIPE_SIZE;
428 pipefragretry++;
429 goto retry;
430 }
431 if (cpipe->pipe_buffer.buffer == NULL) {
432 pipeallocfail++;
433 if (ppsratecheck(&lastfail, &curfail, 1))
434 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
435 } else {
436 piperesizefail++;
437 }
438 return (ENOMEM);
439 }
440
441 /* copy data, then free old resources if we're resizing */
442 if (cnt > 0) {
443 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
444 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
445 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
446 buffer, firstseg);
447 if ((cnt - firstseg) > 0)
448 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
449 cpipe->pipe_buffer.in);
450 } else {
451 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
452 buffer, cnt);
453 }
454 }
455 pipe_free_kmem(cpipe);
456 cpipe->pipe_buffer.buffer = buffer;
457 cpipe->pipe_buffer.size = size;
458 cpipe->pipe_buffer.in = cnt;
459 cpipe->pipe_buffer.out = 0;
460 cpipe->pipe_buffer.cnt = cnt;
461 atomic_add_int(&amountpipekva, cpipe->pipe_buffer.size);
462 return (0);
463 }
464
465 /*
466 * Wrapper for pipespace_new() that performs locking assertions.
467 */
468 static int
469 pipespace(cpipe, size)
470 struct pipe *cpipe;
471 int size;
472 {
473
474 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
475 ("Unlocked pipe passed to pipespace"));
476 return (pipespace_new(cpipe, size));
477 }
478
479 /*
480 * lock a pipe for I/O, blocking other access
481 */
482 static __inline int
483 pipelock(cpipe, catch)
484 struct pipe *cpipe;
485 int catch;
486 {
487 int error;
488
489 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
490 while (cpipe->pipe_state & PIPE_LOCKFL) {
491 cpipe->pipe_state |= PIPE_LWANT;
492 error = msleep(cpipe, PIPE_MTX(cpipe),
493 catch ? (PRIBIO | PCATCH) : PRIBIO,
494 "pipelk", 0);
495 if (error != 0)
496 return (error);
497 }
498 cpipe->pipe_state |= PIPE_LOCKFL;
499 return (0);
500 }
501
502 /*
503 * unlock a pipe I/O lock
504 */
505 static __inline void
506 pipeunlock(cpipe)
507 struct pipe *cpipe;
508 {
509
510 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
511 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
512 ("Unlocked pipe passed to pipeunlock"));
513 cpipe->pipe_state &= ~PIPE_LOCKFL;
514 if (cpipe->pipe_state & PIPE_LWANT) {
515 cpipe->pipe_state &= ~PIPE_LWANT;
516 wakeup(cpipe);
517 }
518 }
519
520 static __inline void
521 pipeselwakeup(cpipe)
522 struct pipe *cpipe;
523 {
524
525 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
526 if (cpipe->pipe_state & PIPE_SEL) {
527 cpipe->pipe_state &= ~PIPE_SEL;
528 selwakeuppri(&cpipe->pipe_sel, PSOCK);
529 }
530 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
531 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
532 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
533 }
534
535 /*
536 * Initialize and allocate VM and memory for pipe. The structure
537 * will start out zero'd from the ctor, so we just manage the kmem.
538 */
539 static int
540 pipe_create(pipe, backing)
541 struct pipe *pipe;
542 int backing;
543 {
544 int error;
545
546 if (backing) {
547 if (amountpipekva > maxpipekva / 2)
548 error = pipespace_new(pipe, SMALL_PIPE_SIZE);
549 else
550 error = pipespace_new(pipe, PIPE_SIZE);
551 } else {
552 /* If we're not backing this pipe, no need to do anything. */
553 error = 0;
554 }
555 return (error);
556 }
557
558 /* ARGSUSED */
559 static int
560 pipe_read(fp, uio, active_cred, flags, td)
561 struct file *fp;
562 struct uio *uio;
563 struct ucred *active_cred;
564 struct thread *td;
565 int flags;
566 {
567 struct pipe *rpipe = fp->f_data;
568 int error;
569 int nread = 0;
570 u_int size;
571
572 PIPE_LOCK(rpipe);
573 ++rpipe->pipe_busy;
574 error = pipelock(rpipe, 1);
575 if (error)
576 goto unlocked_error;
577
578 #ifdef MAC
579 error = mac_check_pipe_read(active_cred, rpipe->pipe_pair);
580 if (error)
581 goto locked_error;
582 #endif
583 if (amountpipekva > (3 * maxpipekva) / 4) {
584 if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
585 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
586 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
587 (piperesizeallowed == 1)) {
588 PIPE_UNLOCK(rpipe);
589 pipespace(rpipe, SMALL_PIPE_SIZE);
590 PIPE_LOCK(rpipe);
591 }
592 }
593
594 while (uio->uio_resid) {
595 /*
596 * normal pipe buffer receive
597 */
598 if (rpipe->pipe_buffer.cnt > 0) {
599 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
600 if (size > rpipe->pipe_buffer.cnt)
601 size = rpipe->pipe_buffer.cnt;
602 if (size > (u_int) uio->uio_resid)
603 size = (u_int) uio->uio_resid;
604
605 PIPE_UNLOCK(rpipe);
606 error = uiomove(
607 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
608 size, uio);
609 PIPE_LOCK(rpipe);
610 if (error)
611 break;
612
613 rpipe->pipe_buffer.out += size;
614 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
615 rpipe->pipe_buffer.out = 0;
616
617 rpipe->pipe_buffer.cnt -= size;
618
619 /*
620 * If there is no more to read in the pipe, reset
621 * its pointers to the beginning. This improves
622 * cache hit stats.
623 */
624 if (rpipe->pipe_buffer.cnt == 0) {
625 rpipe->pipe_buffer.in = 0;
626 rpipe->pipe_buffer.out = 0;
627 }
628 nread += size;
629 #ifndef PIPE_NODIRECT
630 /*
631 * Direct copy, bypassing a kernel buffer.
632 */
633 } else if ((size = rpipe->pipe_map.cnt) &&
634 (rpipe->pipe_state & PIPE_DIRECTW)) {
635 if (size > (u_int) uio->uio_resid)
636 size = (u_int) uio->uio_resid;
637
638 PIPE_UNLOCK(rpipe);
639 error = uiomove_fromphys(rpipe->pipe_map.ms,
640 rpipe->pipe_map.pos, size, uio);
641 PIPE_LOCK(rpipe);
642 if (error)
643 break;
644 nread += size;
645 rpipe->pipe_map.pos += size;
646 rpipe->pipe_map.cnt -= size;
647 if (rpipe->pipe_map.cnt == 0) {
648 rpipe->pipe_state &= ~PIPE_DIRECTW;
649 wakeup(rpipe);
650 }
651 #endif
652 } else {
653 /*
654 * detect EOF condition
655 * read returns 0 on EOF, no need to set error
656 */
657 if (rpipe->pipe_state & PIPE_EOF)
658 break;
659
660 /*
661 * If the "write-side" has been blocked, wake it up now.
662 */
663 if (rpipe->pipe_state & PIPE_WANTW) {
664 rpipe->pipe_state &= ~PIPE_WANTW;
665 wakeup(rpipe);
666 }
667
668 /*
669 * Break if some data was read.
670 */
671 if (nread > 0)
672 break;
673
674 /*
675 * Unlock the pipe buffer for our remaining processing.
676 * We will either break out with an error or we will
677 * sleep and relock to loop.
678 */
679 pipeunlock(rpipe);
680
681 /*
682 * Handle non-blocking mode operation or
683 * wait for more data.
684 */
685 if (fp->f_flag & FNONBLOCK) {
686 error = EAGAIN;
687 } else {
688 rpipe->pipe_state |= PIPE_WANTR;
689 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
690 PRIBIO | PCATCH,
691 "piperd", 0)) == 0)
692 error = pipelock(rpipe, 1);
693 }
694 if (error)
695 goto unlocked_error;
696 }
697 }
698 #ifdef MAC
699 locked_error:
700 #endif
701 pipeunlock(rpipe);
702
703 /* XXX: should probably do this before getting any locks. */
704 if (error == 0)
705 vfs_timestamp(&rpipe->pipe_atime);
706 unlocked_error:
707 --rpipe->pipe_busy;
708
709 /*
710 * PIPE_WANT processing only makes sense if pipe_busy is 0.
711 */
712 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
713 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
714 wakeup(rpipe);
715 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
716 /*
717 * Handle write blocking hysteresis.
718 */
719 if (rpipe->pipe_state & PIPE_WANTW) {
720 rpipe->pipe_state &= ~PIPE_WANTW;
721 wakeup(rpipe);
722 }
723 }
724
725 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
726 pipeselwakeup(rpipe);
727
728 PIPE_UNLOCK(rpipe);
729 return (error);
730 }
731
732 #ifndef PIPE_NODIRECT
733 /*
734 * Map the sending processes' buffer into kernel space and wire it.
735 * This is similar to a physical write operation.
736 */
737 static int
738 pipe_build_write_buffer(wpipe, uio)
739 struct pipe *wpipe;
740 struct uio *uio;
741 {
742 pmap_t pmap;
743 u_int size;
744 int i, j;
745 vm_offset_t addr, endaddr;
746
747 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
748 KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
749 ("Clone attempt on non-direct write pipe!"));
750
751 size = (u_int) uio->uio_iov->iov_len;
752 if (size > wpipe->pipe_buffer.size)
753 size = wpipe->pipe_buffer.size;
754
755 pmap = vmspace_pmap(curproc->p_vmspace);
756 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
757 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
758 if (endaddr < addr)
759 return (EFAULT);
760 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
761 /*
762 * vm_fault_quick() can sleep. Consequently,
763 * vm_page_lock_queue() and vm_page_unlock_queue()
764 * should not be performed outside of this loop.
765 */
766 race:
767 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) {
768 vm_page_lock_queues();
769 for (j = 0; j < i; j++)
770 vm_page_unhold(wpipe->pipe_map.ms[j]);
771 vm_page_unlock_queues();
772 return (EFAULT);
773 }
774 wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr,
775 VM_PROT_READ);
776 if (wpipe->pipe_map.ms[i] == NULL)
777 goto race;
778 }
779
780 /*
781 * set up the control block
782 */
783 wpipe->pipe_map.npages = i;
784 wpipe->pipe_map.pos =
785 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
786 wpipe->pipe_map.cnt = size;
787
788 /*
789 * and update the uio data
790 */
791
792 uio->uio_iov->iov_len -= size;
793 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
794 if (uio->uio_iov->iov_len == 0)
795 uio->uio_iov++;
796 uio->uio_resid -= size;
797 uio->uio_offset += size;
798 return (0);
799 }
800
801 /*
802 * unmap and unwire the process buffer
803 */
804 static void
805 pipe_destroy_write_buffer(wpipe)
806 struct pipe *wpipe;
807 {
808 int i;
809
810 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
811 vm_page_lock_queues();
812 for (i = 0; i < wpipe->pipe_map.npages; i++) {
813 vm_page_unhold(wpipe->pipe_map.ms[i]);
814 }
815 vm_page_unlock_queues();
816 wpipe->pipe_map.npages = 0;
817 }
818
819 /*
820 * In the case of a signal, the writing process might go away. This
821 * code copies the data into the circular buffer so that the source
822 * pages can be freed without loss of data.
823 */
824 static void
825 pipe_clone_write_buffer(wpipe)
826 struct pipe *wpipe;
827 {
828 struct uio uio;
829 struct iovec iov;
830 int size;
831 int pos;
832
833 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
834 size = wpipe->pipe_map.cnt;
835 pos = wpipe->pipe_map.pos;
836
837 wpipe->pipe_buffer.in = size;
838 wpipe->pipe_buffer.out = 0;
839 wpipe->pipe_buffer.cnt = size;
840 wpipe->pipe_state &= ~PIPE_DIRECTW;
841
842 PIPE_UNLOCK(wpipe);
843 iov.iov_base = wpipe->pipe_buffer.buffer;
844 iov.iov_len = size;
845 uio.uio_iov = &iov;
846 uio.uio_iovcnt = 1;
847 uio.uio_offset = 0;
848 uio.uio_resid = size;
849 uio.uio_segflg = UIO_SYSSPACE;
850 uio.uio_rw = UIO_READ;
851 uio.uio_td = curthread;
852 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
853 PIPE_LOCK(wpipe);
854 pipe_destroy_write_buffer(wpipe);
855 }
856
857 /*
858 * This implements the pipe buffer write mechanism. Note that only
859 * a direct write OR a normal pipe write can be pending at any given time.
860 * If there are any characters in the pipe buffer, the direct write will
861 * be deferred until the receiving process grabs all of the bytes from
862 * the pipe buffer. Then the direct mapping write is set-up.
863 */
864 static int
865 pipe_direct_write(wpipe, uio)
866 struct pipe *wpipe;
867 struct uio *uio;
868 {
869 int error;
870
871 retry:
872 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
873 error = pipelock(wpipe, 1);
874 if (wpipe->pipe_state & PIPE_EOF)
875 error = EPIPE;
876 if (error) {
877 pipeunlock(wpipe);
878 goto error1;
879 }
880 while (wpipe->pipe_state & PIPE_DIRECTW) {
881 if (wpipe->pipe_state & PIPE_WANTR) {
882 wpipe->pipe_state &= ~PIPE_WANTR;
883 wakeup(wpipe);
884 }
885 pipeselwakeup(wpipe);
886 wpipe->pipe_state |= PIPE_WANTW;
887 pipeunlock(wpipe);
888 error = msleep(wpipe, PIPE_MTX(wpipe),
889 PRIBIO | PCATCH, "pipdww", 0);
890 if (error)
891 goto error1;
892 else
893 goto retry;
894 }
895 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
896 if (wpipe->pipe_buffer.cnt > 0) {
897 if (wpipe->pipe_state & PIPE_WANTR) {
898 wpipe->pipe_state &= ~PIPE_WANTR;
899 wakeup(wpipe);
900 }
901 pipeselwakeup(wpipe);
902 wpipe->pipe_state |= PIPE_WANTW;
903 pipeunlock(wpipe);
904 error = msleep(wpipe, PIPE_MTX(wpipe),
905 PRIBIO | PCATCH, "pipdwc", 0);
906 if (error)
907 goto error1;
908 else
909 goto retry;
910 }
911
912 wpipe->pipe_state |= PIPE_DIRECTW;
913
914 PIPE_UNLOCK(wpipe);
915 error = pipe_build_write_buffer(wpipe, uio);
916 PIPE_LOCK(wpipe);
917 if (error) {
918 wpipe->pipe_state &= ~PIPE_DIRECTW;
919 pipeunlock(wpipe);
920 goto error1;
921 }
922
923 error = 0;
924 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
925 if (wpipe->pipe_state & PIPE_EOF) {
926 pipe_destroy_write_buffer(wpipe);
927 pipeselwakeup(wpipe);
928 pipeunlock(wpipe);
929 error = EPIPE;
930 goto error1;
931 }
932 if (wpipe->pipe_state & PIPE_WANTR) {
933 wpipe->pipe_state &= ~PIPE_WANTR;
934 wakeup(wpipe);
935 }
936 pipeselwakeup(wpipe);
937 pipeunlock(wpipe);
938 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
939 "pipdwt", 0);
940 pipelock(wpipe, 0);
941 }
942
943 if (wpipe->pipe_state & PIPE_EOF)
944 error = EPIPE;
945 if (wpipe->pipe_state & PIPE_DIRECTW) {
946 /*
947 * this bit of trickery substitutes a kernel buffer for
948 * the process that might be going away.
949 */
950 pipe_clone_write_buffer(wpipe);
951 } else {
952 pipe_destroy_write_buffer(wpipe);
953 }
954 pipeunlock(wpipe);
955 return (error);
956
957 error1:
958 wakeup(wpipe);
959 return (error);
960 }
961 #endif
962
963 static int
964 pipe_write(fp, uio, active_cred, flags, td)
965 struct file *fp;
966 struct uio *uio;
967 struct ucred *active_cred;
968 struct thread *td;
969 int flags;
970 {
971 int error = 0;
972 int desiredsize, orig_resid;
973 struct pipe *wpipe, *rpipe;
974
975 rpipe = fp->f_data;
976 wpipe = rpipe->pipe_peer;
977
978 PIPE_LOCK(rpipe);
979 error = pipelock(wpipe, 1);
980 if (error) {
981 PIPE_UNLOCK(rpipe);
982 return (error);
983 }
984 /*
985 * detect loss of pipe read side, issue SIGPIPE if lost.
986 */
987 if (wpipe->pipe_present != PIPE_ACTIVE ||
988 (wpipe->pipe_state & PIPE_EOF)) {
989 pipeunlock(wpipe);
990 PIPE_UNLOCK(rpipe);
991 return (EPIPE);
992 }
993 #ifdef MAC
994 error = mac_check_pipe_write(active_cred, wpipe->pipe_pair);
995 if (error) {
996 pipeunlock(wpipe);
997 PIPE_UNLOCK(rpipe);
998 return (error);
999 }
1000 #endif
1001 ++wpipe->pipe_busy;
1002
1003 /* Choose a larger size if it's advantageous */
1004 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
1005 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
1006 if (piperesizeallowed != 1)
1007 break;
1008 if (amountpipekva > maxpipekva / 2)
1009 break;
1010 if (desiredsize == BIG_PIPE_SIZE)
1011 break;
1012 desiredsize = desiredsize * 2;
1013 }
1014
1015 /* Choose a smaller size if we're in a OOM situation */
1016 if ((amountpipekva > (3 * maxpipekva) / 4) &&
1017 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1018 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1019 (piperesizeallowed == 1))
1020 desiredsize = SMALL_PIPE_SIZE;
1021
1022 /* Resize if the above determined that a new size was necessary */
1023 if ((desiredsize != wpipe->pipe_buffer.size) &&
1024 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1025 PIPE_UNLOCK(wpipe);
1026 pipespace(wpipe, desiredsize);
1027 PIPE_LOCK(wpipe);
1028 }
1029 if (wpipe->pipe_buffer.size == 0) {
1030 /*
1031 * This can only happen for reverse direction use of pipes
1032 * in a complete OOM situation.
1033 */
1034 error = ENOMEM;
1035 --wpipe->pipe_busy;
1036 pipeunlock(wpipe);
1037 PIPE_UNLOCK(wpipe);
1038 return (error);
1039 }
1040
1041 pipeunlock(wpipe);
1042
1043 orig_resid = uio->uio_resid;
1044
1045 while (uio->uio_resid) {
1046 int space;
1047
1048 pipelock(wpipe, 0);
1049 if (wpipe->pipe_state & PIPE_EOF) {
1050 pipeunlock(wpipe);
1051 error = EPIPE;
1052 break;
1053 }
1054 #ifndef PIPE_NODIRECT
1055 /*
1056 * If the transfer is large, we can gain performance if
1057 * we do process-to-process copies directly.
1058 * If the write is non-blocking, we don't use the
1059 * direct write mechanism.
1060 *
1061 * The direct write mechanism will detect the reader going
1062 * away on us.
1063 */
1064 if (uio->uio_segflg == UIO_USERSPACE &&
1065 uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1066 wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1067 (fp->f_flag & FNONBLOCK) == 0) {
1068 pipeunlock(wpipe);
1069 error = pipe_direct_write(wpipe, uio);
1070 if (error)
1071 break;
1072 continue;
1073 }
1074 #endif
1075
1076 /*
1077 * Pipe buffered writes cannot be coincidental with
1078 * direct writes. We wait until the currently executing
1079 * direct write is completed before we start filling the
1080 * pipe buffer. We break out if a signal occurs or the
1081 * reader goes away.
1082 */
1083 if (wpipe->pipe_state & PIPE_DIRECTW) {
1084 if (wpipe->pipe_state & PIPE_WANTR) {
1085 wpipe->pipe_state &= ~PIPE_WANTR;
1086 wakeup(wpipe);
1087 }
1088 pipeselwakeup(wpipe);
1089 wpipe->pipe_state |= PIPE_WANTW;
1090 pipeunlock(wpipe);
1091 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1092 "pipbww", 0);
1093 if (error)
1094 break;
1095 else
1096 continue;
1097 }
1098
1099 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1100
1101 /* Writes of size <= PIPE_BUF must be atomic. */
1102 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1103 space = 0;
1104
1105 if (space > 0) {
1106 int size; /* Transfer size */
1107 int segsize; /* first segment to transfer */
1108
1109 /*
1110 * Transfer size is minimum of uio transfer
1111 * and free space in pipe buffer.
1112 */
1113 if (space > uio->uio_resid)
1114 size = uio->uio_resid;
1115 else
1116 size = space;
1117 /*
1118 * First segment to transfer is minimum of
1119 * transfer size and contiguous space in
1120 * pipe buffer. If first segment to transfer
1121 * is less than the transfer size, we've got
1122 * a wraparound in the buffer.
1123 */
1124 segsize = wpipe->pipe_buffer.size -
1125 wpipe->pipe_buffer.in;
1126 if (segsize > size)
1127 segsize = size;
1128
1129 /* Transfer first segment */
1130
1131 PIPE_UNLOCK(rpipe);
1132 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1133 segsize, uio);
1134 PIPE_LOCK(rpipe);
1135
1136 if (error == 0 && segsize < size) {
1137 KASSERT(wpipe->pipe_buffer.in + segsize ==
1138 wpipe->pipe_buffer.size,
1139 ("Pipe buffer wraparound disappeared"));
1140 /*
1141 * Transfer remaining part now, to
1142 * support atomic writes. Wraparound
1143 * happened.
1144 */
1145
1146 PIPE_UNLOCK(rpipe);
1147 error = uiomove(
1148 &wpipe->pipe_buffer.buffer[0],
1149 size - segsize, uio);
1150 PIPE_LOCK(rpipe);
1151 }
1152 if (error == 0) {
1153 wpipe->pipe_buffer.in += size;
1154 if (wpipe->pipe_buffer.in >=
1155 wpipe->pipe_buffer.size) {
1156 KASSERT(wpipe->pipe_buffer.in ==
1157 size - segsize +
1158 wpipe->pipe_buffer.size,
1159 ("Expected wraparound bad"));
1160 wpipe->pipe_buffer.in = size - segsize;
1161 }
1162
1163 wpipe->pipe_buffer.cnt += size;
1164 KASSERT(wpipe->pipe_buffer.cnt <=
1165 wpipe->pipe_buffer.size,
1166 ("Pipe buffer overflow"));
1167 }
1168 pipeunlock(wpipe);
1169 if (error != 0)
1170 break;
1171 } else {
1172 /*
1173 * If the "read-side" has been blocked, wake it up now.
1174 */
1175 if (wpipe->pipe_state & PIPE_WANTR) {
1176 wpipe->pipe_state &= ~PIPE_WANTR;
1177 wakeup(wpipe);
1178 }
1179
1180 /*
1181 * don't block on non-blocking I/O
1182 */
1183 if (fp->f_flag & FNONBLOCK) {
1184 error = EAGAIN;
1185 pipeunlock(wpipe);
1186 break;
1187 }
1188
1189 /*
1190 * We have no more space and have something to offer,
1191 * wake up select/poll.
1192 */
1193 pipeselwakeup(wpipe);
1194
1195 wpipe->pipe_state |= PIPE_WANTW;
1196 pipeunlock(wpipe);
1197 error = msleep(wpipe, PIPE_MTX(rpipe),
1198 PRIBIO | PCATCH, "pipewr", 0);
1199 if (error != 0)
1200 break;
1201 }
1202 }
1203
1204 pipelock(wpipe, 0);
1205 --wpipe->pipe_busy;
1206
1207 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1208 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1209 wakeup(wpipe);
1210 } else if (wpipe->pipe_buffer.cnt > 0) {
1211 /*
1212 * If we have put any characters in the buffer, we wake up
1213 * the reader.
1214 */
1215 if (wpipe->pipe_state & PIPE_WANTR) {
1216 wpipe->pipe_state &= ~PIPE_WANTR;
1217 wakeup(wpipe);
1218 }
1219 }
1220
1221 /*
1222 * Don't return EPIPE if I/O was successful
1223 */
1224 if ((wpipe->pipe_buffer.cnt == 0) &&
1225 (uio->uio_resid == 0) &&
1226 (error == EPIPE)) {
1227 error = 0;
1228 }
1229
1230 if (error == 0)
1231 vfs_timestamp(&wpipe->pipe_mtime);
1232
1233 /*
1234 * We have something to offer,
1235 * wake up select/poll.
1236 */
1237 if (wpipe->pipe_buffer.cnt)
1238 pipeselwakeup(wpipe);
1239
1240 pipeunlock(wpipe);
1241 PIPE_UNLOCK(rpipe);
1242 return (error);
1243 }
1244
1245 /*
1246 * we implement a very minimal set of ioctls for compatibility with sockets.
1247 */
1248 static int
1249 pipe_ioctl(fp, cmd, data, active_cred, td)
1250 struct file *fp;
1251 u_long cmd;
1252 void *data;
1253 struct ucred *active_cred;
1254 struct thread *td;
1255 {
1256 struct pipe *mpipe = fp->f_data;
1257 int error;
1258
1259 PIPE_LOCK(mpipe);
1260
1261 #ifdef MAC
1262 error = mac_check_pipe_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1263 if (error) {
1264 PIPE_UNLOCK(mpipe);
1265 return (error);
1266 }
1267 #endif
1268
1269 error = 0;
1270 switch (cmd) {
1271
1272 case FIONBIO:
1273 break;
1274
1275 case FIOASYNC:
1276 if (*(int *)data) {
1277 mpipe->pipe_state |= PIPE_ASYNC;
1278 } else {
1279 mpipe->pipe_state &= ~PIPE_ASYNC;
1280 }
1281 break;
1282
1283 case FIONREAD:
1284 if (mpipe->pipe_state & PIPE_DIRECTW)
1285 *(int *)data = mpipe->pipe_map.cnt;
1286 else
1287 *(int *)data = mpipe->pipe_buffer.cnt;
1288 break;
1289
1290 case FIOSETOWN:
1291 PIPE_UNLOCK(mpipe);
1292 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1293 goto out_unlocked;
1294
1295 case FIOGETOWN:
1296 *(int *)data = fgetown(&mpipe->pipe_sigio);
1297 break;
1298
1299 /* This is deprecated, FIOSETOWN should be used instead. */
1300 case TIOCSPGRP:
1301 PIPE_UNLOCK(mpipe);
1302 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1303 goto out_unlocked;
1304
1305 /* This is deprecated, FIOGETOWN should be used instead. */
1306 case TIOCGPGRP:
1307 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1308 break;
1309
1310 default:
1311 error = ENOTTY;
1312 break;
1313 }
1314 PIPE_UNLOCK(mpipe);
1315 out_unlocked:
1316 return (error);
1317 }
1318
1319 static int
1320 pipe_poll(fp, events, active_cred, td)
1321 struct file *fp;
1322 int events;
1323 struct ucred *active_cred;
1324 struct thread *td;
1325 {
1326 struct pipe *rpipe = fp->f_data;
1327 struct pipe *wpipe;
1328 int revents = 0;
1329 #ifdef MAC
1330 int error;
1331 #endif
1332
1333 wpipe = rpipe->pipe_peer;
1334 PIPE_LOCK(rpipe);
1335 #ifdef MAC
1336 error = mac_check_pipe_poll(active_cred, rpipe->pipe_pair);
1337 if (error)
1338 goto locked_error;
1339 #endif
1340 if (events & (POLLIN | POLLRDNORM))
1341 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1342 (rpipe->pipe_buffer.cnt > 0) ||
1343 (rpipe->pipe_state & PIPE_EOF))
1344 revents |= events & (POLLIN | POLLRDNORM);
1345
1346 if (events & (POLLOUT | POLLWRNORM))
1347 if (wpipe->pipe_present != PIPE_ACTIVE ||
1348 (wpipe->pipe_state & PIPE_EOF) ||
1349 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1350 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1351 revents |= events & (POLLOUT | POLLWRNORM);
1352
1353 if ((rpipe->pipe_state & PIPE_EOF) ||
1354 wpipe->pipe_present != PIPE_ACTIVE ||
1355 (wpipe->pipe_state & PIPE_EOF))
1356 revents |= POLLHUP;
1357
1358 if (revents == 0) {
1359 if (events & (POLLIN | POLLRDNORM)) {
1360 selrecord(td, &rpipe->pipe_sel);
1361 rpipe->pipe_state |= PIPE_SEL;
1362 }
1363
1364 if (events & (POLLOUT | POLLWRNORM)) {
1365 selrecord(td, &wpipe->pipe_sel);
1366 wpipe->pipe_state |= PIPE_SEL;
1367 }
1368 }
1369 #ifdef MAC
1370 locked_error:
1371 #endif
1372 PIPE_UNLOCK(rpipe);
1373
1374 return (revents);
1375 }
1376
1377 /*
1378 * We shouldn't need locks here as we're doing a read and this should
1379 * be a natural race.
1380 */
1381 static int
1382 pipe_stat(fp, ub, active_cred, td)
1383 struct file *fp;
1384 struct stat *ub;
1385 struct ucred *active_cred;
1386 struct thread *td;
1387 {
1388 struct pipe *pipe = fp->f_data;
1389 #ifdef MAC
1390 int error;
1391
1392 PIPE_LOCK(pipe);
1393 error = mac_check_pipe_stat(active_cred, pipe->pipe_pair);
1394 PIPE_UNLOCK(pipe);
1395 if (error)
1396 return (error);
1397 #endif
1398 bzero(ub, sizeof(*ub));
1399 ub->st_mode = S_IFIFO;
1400 ub->st_blksize = PAGE_SIZE;
1401 if (pipe->pipe_state & PIPE_DIRECTW)
1402 ub->st_size = pipe->pipe_map.cnt;
1403 else
1404 ub->st_size = pipe->pipe_buffer.cnt;
1405 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1406 ub->st_atimespec = pipe->pipe_atime;
1407 ub->st_mtimespec = pipe->pipe_mtime;
1408 ub->st_ctimespec = pipe->pipe_ctime;
1409 ub->st_uid = fp->f_cred->cr_uid;
1410 ub->st_gid = fp->f_cred->cr_gid;
1411 /*
1412 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1413 * XXX (st_dev, st_ino) should be unique.
1414 */
1415 return (0);
1416 }
1417
1418 /* ARGSUSED */
1419 static int
1420 pipe_close(fp, td)
1421 struct file *fp;
1422 struct thread *td;
1423 {
1424 struct pipe *cpipe = fp->f_data;
1425
1426 fp->f_ops = &badfileops;
1427 fp->f_data = NULL;
1428 funsetown(&cpipe->pipe_sigio);
1429 pipeclose(cpipe);
1430 return (0);
1431 }
1432
1433 static void
1434 pipe_free_kmem(cpipe)
1435 struct pipe *cpipe;
1436 {
1437
1438 KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1439 ("pipe_free_kmem: pipe mutex locked"));
1440
1441 if (cpipe->pipe_buffer.buffer != NULL) {
1442 atomic_subtract_int(&amountpipekva, cpipe->pipe_buffer.size);
1443 vm_map_remove(pipe_map,
1444 (vm_offset_t)cpipe->pipe_buffer.buffer,
1445 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
1446 cpipe->pipe_buffer.buffer = NULL;
1447 }
1448 #ifndef PIPE_NODIRECT
1449 {
1450 cpipe->pipe_map.cnt = 0;
1451 cpipe->pipe_map.pos = 0;
1452 cpipe->pipe_map.npages = 0;
1453 }
1454 #endif
1455 }
1456
1457 /*
1458 * shutdown the pipe
1459 */
1460 static void
1461 pipeclose(cpipe)
1462 struct pipe *cpipe;
1463 {
1464 struct pipepair *pp;
1465 struct pipe *ppipe;
1466
1467 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1468
1469 PIPE_LOCK(cpipe);
1470 pipelock(cpipe, 0);
1471 pp = cpipe->pipe_pair;
1472
1473 pipeselwakeup(cpipe);
1474
1475 /*
1476 * If the other side is blocked, wake it up saying that
1477 * we want to close it down.
1478 */
1479 cpipe->pipe_state |= PIPE_EOF;
1480 while (cpipe->pipe_busy) {
1481 wakeup(cpipe);
1482 cpipe->pipe_state |= PIPE_WANT;
1483 pipeunlock(cpipe);
1484 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1485 pipelock(cpipe, 0);
1486 }
1487
1488
1489 /*
1490 * Disconnect from peer, if any.
1491 */
1492 ppipe = cpipe->pipe_peer;
1493 if (ppipe->pipe_present == PIPE_ACTIVE) {
1494 pipeselwakeup(ppipe);
1495
1496 ppipe->pipe_state |= PIPE_EOF;
1497 wakeup(ppipe);
1498 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1499 }
1500
1501 /*
1502 * Mark this endpoint as free. Release kmem resources. We
1503 * don't mark this endpoint as unused until we've finished
1504 * doing that, or the pipe might disappear out from under
1505 * us.
1506 */
1507 PIPE_UNLOCK(cpipe);
1508 pipe_free_kmem(cpipe);
1509 PIPE_LOCK(cpipe);
1510 cpipe->pipe_present = PIPE_CLOSING;
1511 pipeunlock(cpipe);
1512
1513 /*
1514 * knlist_clear() may sleep dropping the PIPE_MTX. Set the
1515 * PIPE_FINALIZED, that allows other end to free the
1516 * pipe_pair, only after the knotes are completely dismantled.
1517 */
1518 knlist_clear(&cpipe->pipe_sel.si_note, 1);
1519 cpipe->pipe_present = PIPE_FINALIZED;
1520 knlist_destroy(&cpipe->pipe_sel.si_note);
1521
1522 /*
1523 * If both endpoints are now closed, release the memory for the
1524 * pipe pair. If not, unlock.
1525 */
1526 if (ppipe->pipe_present == PIPE_FINALIZED) {
1527 PIPE_UNLOCK(cpipe);
1528 #ifdef MAC
1529 mac_destroy_pipe(pp);
1530 #endif
1531 uma_zfree(pipe_zone, cpipe->pipe_pair);
1532 } else
1533 PIPE_UNLOCK(cpipe);
1534 }
1535
1536 /*ARGSUSED*/
1537 static int
1538 pipe_kqfilter(struct file *fp, struct knote *kn)
1539 {
1540 struct pipe *cpipe;
1541
1542 cpipe = kn->kn_fp->f_data;
1543 PIPE_LOCK(cpipe);
1544 switch (kn->kn_filter) {
1545 case EVFILT_READ:
1546 kn->kn_fop = &pipe_rfiltops;
1547 break;
1548 case EVFILT_WRITE:
1549 kn->kn_fop = &pipe_wfiltops;
1550 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
1551 /* other end of pipe has been closed */
1552 PIPE_UNLOCK(cpipe);
1553 return (EPIPE);
1554 }
1555 cpipe = cpipe->pipe_peer;
1556 break;
1557 default:
1558 PIPE_UNLOCK(cpipe);
1559 return (EINVAL);
1560 }
1561
1562 knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
1563 PIPE_UNLOCK(cpipe);
1564 return (0);
1565 }
1566
1567 static void
1568 filt_pipedetach(struct knote *kn)
1569 {
1570 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1571
1572 PIPE_LOCK(cpipe);
1573 if (kn->kn_filter == EVFILT_WRITE)
1574 cpipe = cpipe->pipe_peer;
1575 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
1576 PIPE_UNLOCK(cpipe);
1577 }
1578
1579 /*ARGSUSED*/
1580 static int
1581 filt_piperead(struct knote *kn, long hint)
1582 {
1583 struct pipe *rpipe = kn->kn_fp->f_data;
1584 struct pipe *wpipe = rpipe->pipe_peer;
1585 int ret;
1586
1587 PIPE_LOCK(rpipe);
1588 kn->kn_data = rpipe->pipe_buffer.cnt;
1589 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1590 kn->kn_data = rpipe->pipe_map.cnt;
1591
1592 if ((rpipe->pipe_state & PIPE_EOF) ||
1593 wpipe->pipe_present != PIPE_ACTIVE ||
1594 (wpipe->pipe_state & PIPE_EOF)) {
1595 kn->kn_flags |= EV_EOF;
1596 PIPE_UNLOCK(rpipe);
1597 return (1);
1598 }
1599 ret = kn->kn_data > 0;
1600 PIPE_UNLOCK(rpipe);
1601 return ret;
1602 }
1603
1604 /*ARGSUSED*/
1605 static int
1606 filt_pipewrite(struct knote *kn, long hint)
1607 {
1608 struct pipe *rpipe = kn->kn_fp->f_data;
1609 struct pipe *wpipe = rpipe->pipe_peer;
1610
1611 PIPE_LOCK(rpipe);
1612 if (wpipe->pipe_present != PIPE_ACTIVE ||
1613 (wpipe->pipe_state & PIPE_EOF)) {
1614 kn->kn_data = 0;
1615 kn->kn_flags |= EV_EOF;
1616 PIPE_UNLOCK(rpipe);
1617 return (1);
1618 }
1619 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1620 if (wpipe->pipe_state & PIPE_DIRECTW)
1621 kn->kn_data = 0;
1622
1623 PIPE_UNLOCK(rpipe);
1624 return (kn->kn_data >= PIPE_BUF);
1625 }
Cache object: b97425513772080db498f5aa697486af
|