FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_pipe.c
1 /*-
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 */
19
20 /*
21 * This file contains a high-performance replacement for the socket-based
22 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
23 * all features of sockets, but does do everything that pipes normally
24 * do.
25 */
26
27 /*
28 * This code has two modes of operation, a small write mode and a large
29 * write mode. The small write mode acts like conventional pipes with
30 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
31 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
32 * and PIPE_SIZE in size, the sending process pins the underlying pages in
33 * memory, and the receiving process copies directly from these pinned pages
34 * in the sending process.
35 *
36 * If the sending process receives a signal, it is possible that it will
37 * go away, and certainly its address space can change, because control
38 * is returned back to the user-mode side. In that case, the pipe code
39 * arranges to copy the buffer supplied by the user process, to a pageable
40 * kernel buffer, and the receiving process will grab the data from the
41 * pageable kernel buffer. Since signals don't happen all that often,
42 * the copy operation is normally eliminated.
43 *
44 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
45 * happen for small transfers so that the system will not spend all of
46 * its time context switching.
47 *
48 * In order to limit the resource use of pipes, two sysctls exist:
49 *
50 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
51 * address space available to us in pipe_map. This value is normally
52 * autotuned, but may also be loader tuned.
53 *
54 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
55 * memory in use by pipes.
56 *
57 * Based on how large pipekva is relative to maxpipekva, the following
58 * will happen:
59 *
60 * 0% - 50%:
61 * New pipes are given 16K of memory backing, pipes may dynamically
62 * grow to as large as 64K where needed.
63 * 50% - 75%:
64 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
65 * existing pipes may NOT grow.
66 * 75% - 100%:
67 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
68 * existing pipes will be shrunk down to 4K whenever possible.
69 *
70 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If
71 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
72 * resize which MUST occur for reverse-direction pipes when they are
73 * first used.
74 *
75 * Additional information about the current state of pipes may be obtained
76 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
77 * and kern.ipc.piperesizefail.
78 *
79 * Locking rules: There are two locks present here: A mutex, used via
80 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via
81 * the flag, as mutexes can not persist over uiomove. The mutex
82 * exists only to guard access to the flag, and is not in itself a
83 * locking mechanism. Also note that there is only a single mutex for
84 * both directions of a pipe.
85 *
86 * As pipelock() may have to sleep before it can acquire the flag, it
87 * is important to reread all data after a call to pipelock(); everything
88 * in the structure may have changed.
89 */
90
91 #include <sys/cdefs.h>
92 __FBSDID("$FreeBSD: releng/9.1/sys/kern/sys_pipe.c 233353 2012-03-23 11:26:54Z kib $");
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/conf.h>
97 #include <sys/fcntl.h>
98 #include <sys/file.h>
99 #include <sys/filedesc.h>
100 #include <sys/filio.h>
101 #include <sys/kernel.h>
102 #include <sys/lock.h>
103 #include <sys/mutex.h>
104 #include <sys/ttycom.h>
105 #include <sys/stat.h>
106 #include <sys/malloc.h>
107 #include <sys/poll.h>
108 #include <sys/selinfo.h>
109 #include <sys/signalvar.h>
110 #include <sys/syscallsubr.h>
111 #include <sys/sysctl.h>
112 #include <sys/sysproto.h>
113 #include <sys/pipe.h>
114 #include <sys/proc.h>
115 #include <sys/vnode.h>
116 #include <sys/uio.h>
117 #include <sys/event.h>
118
119 #include <security/mac/mac_framework.h>
120
121 #include <vm/vm.h>
122 #include <vm/vm_param.h>
123 #include <vm/vm_object.h>
124 #include <vm/vm_kern.h>
125 #include <vm/vm_extern.h>
126 #include <vm/pmap.h>
127 #include <vm/vm_map.h>
128 #include <vm/vm_page.h>
129 #include <vm/uma.h>
130
131 /*
132 * Use this define if you want to disable *fancy* VM things. Expect an
133 * approx 30% decrease in transfer rate. This could be useful for
134 * NetBSD or OpenBSD.
135 */
136 /* #define PIPE_NODIRECT */
137
138 /*
139 * interfaces to the outside world
140 */
141 static fo_rdwr_t pipe_read;
142 static fo_rdwr_t pipe_write;
143 static fo_truncate_t pipe_truncate;
144 static fo_ioctl_t pipe_ioctl;
145 static fo_poll_t pipe_poll;
146 static fo_kqfilter_t pipe_kqfilter;
147 static fo_stat_t pipe_stat;
148 static fo_close_t pipe_close;
149
150 static struct fileops pipeops = {
151 .fo_read = pipe_read,
152 .fo_write = pipe_write,
153 .fo_truncate = pipe_truncate,
154 .fo_ioctl = pipe_ioctl,
155 .fo_poll = pipe_poll,
156 .fo_kqfilter = pipe_kqfilter,
157 .fo_stat = pipe_stat,
158 .fo_close = pipe_close,
159 .fo_chmod = invfo_chmod,
160 .fo_chown = invfo_chown,
161 .fo_flags = DFLAG_PASSABLE
162 };
163
164 static void filt_pipedetach(struct knote *kn);
165 static int filt_piperead(struct knote *kn, long hint);
166 static int filt_pipewrite(struct knote *kn, long hint);
167
168 static struct filterops pipe_rfiltops = {
169 .f_isfd = 1,
170 .f_detach = filt_pipedetach,
171 .f_event = filt_piperead
172 };
173 static struct filterops pipe_wfiltops = {
174 .f_isfd = 1,
175 .f_detach = filt_pipedetach,
176 .f_event = filt_pipewrite
177 };
178
179 /*
180 * Default pipe buffer size(s), this can be kind-of large now because pipe
181 * space is pageable. The pipe code will try to maintain locality of
182 * reference for performance reasons, so small amounts of outstanding I/O
183 * will not wipe the cache.
184 */
185 #define MINPIPESIZE (PIPE_SIZE/3)
186 #define MAXPIPESIZE (2*PIPE_SIZE/3)
187
188 static long amountpipekva;
189 static int pipefragretry;
190 static int pipeallocfail;
191 static int piperesizefail;
192 static int piperesizeallowed = 1;
193
194 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN,
195 &maxpipekva, 0, "Pipe KVA limit");
196 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
197 &amountpipekva, 0, "Pipe KVA usage");
198 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
199 &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
200 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
201 &pipeallocfail, 0, "Pipe allocation failures");
202 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
203 &piperesizefail, 0, "Pipe resize failures");
204 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
205 &piperesizeallowed, 0, "Pipe resizing allowed");
206
207 static void pipeinit(void *dummy __unused);
208 static void pipeclose(struct pipe *cpipe);
209 static void pipe_free_kmem(struct pipe *cpipe);
210 static int pipe_create(struct pipe *pipe, int backing);
211 static __inline int pipelock(struct pipe *cpipe, int catch);
212 static __inline void pipeunlock(struct pipe *cpipe);
213 static __inline void pipeselwakeup(struct pipe *cpipe);
214 #ifndef PIPE_NODIRECT
215 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
216 static void pipe_destroy_write_buffer(struct pipe *wpipe);
217 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
218 static void pipe_clone_write_buffer(struct pipe *wpipe);
219 #endif
220 static int pipespace(struct pipe *cpipe, int size);
221 static int pipespace_new(struct pipe *cpipe, int size);
222
223 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags);
224 static int pipe_zone_init(void *mem, int size, int flags);
225 static void pipe_zone_fini(void *mem, int size);
226
227 static uma_zone_t pipe_zone;
228 static struct unrhdr *pipeino_unr;
229 static dev_t pipedev_ino;
230
231 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
232
233 static void
234 pipeinit(void *dummy __unused)
235 {
236
237 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
238 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
239 UMA_ALIGN_PTR, 0);
240 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
241 pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
242 KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
243 pipedev_ino = devfs_alloc_cdp_inode();
244 KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
245 }
246
247 static int
248 pipe_zone_ctor(void *mem, int size, void *arg, int flags)
249 {
250 struct pipepair *pp;
251 struct pipe *rpipe, *wpipe;
252
253 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
254
255 pp = (struct pipepair *)mem;
256
257 /*
258 * We zero both pipe endpoints to make sure all the kmem pointers
259 * are NULL, flag fields are zero'd, etc. We timestamp both
260 * endpoints with the same time.
261 */
262 rpipe = &pp->pp_rpipe;
263 bzero(rpipe, sizeof(*rpipe));
264 vfs_timestamp(&rpipe->pipe_ctime);
265 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
266
267 wpipe = &pp->pp_wpipe;
268 bzero(wpipe, sizeof(*wpipe));
269 wpipe->pipe_ctime = rpipe->pipe_ctime;
270 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
271
272 rpipe->pipe_peer = wpipe;
273 rpipe->pipe_pair = pp;
274 wpipe->pipe_peer = rpipe;
275 wpipe->pipe_pair = pp;
276
277 /*
278 * Mark both endpoints as present; they will later get free'd
279 * one at a time. When both are free'd, then the whole pair
280 * is released.
281 */
282 rpipe->pipe_present = PIPE_ACTIVE;
283 wpipe->pipe_present = PIPE_ACTIVE;
284
285 /*
286 * Eventually, the MAC Framework may initialize the label
287 * in ctor or init, but for now we do it elswhere to avoid
288 * blocking in ctor or init.
289 */
290 pp->pp_label = NULL;
291
292 return (0);
293 }
294
295 static int
296 pipe_zone_init(void *mem, int size, int flags)
297 {
298 struct pipepair *pp;
299
300 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
301
302 pp = (struct pipepair *)mem;
303
304 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
305 return (0);
306 }
307
308 static void
309 pipe_zone_fini(void *mem, int size)
310 {
311 struct pipepair *pp;
312
313 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
314
315 pp = (struct pipepair *)mem;
316
317 mtx_destroy(&pp->pp_mtx);
318 }
319
320 /*
321 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
322 * the zone pick up the pieces via pipeclose().
323 */
324 int
325 kern_pipe(struct thread *td, int fildes[2])
326 {
327 struct filedesc *fdp = td->td_proc->p_fd;
328 struct file *rf, *wf;
329 struct pipepair *pp;
330 struct pipe *rpipe, *wpipe;
331 int fd, error;
332
333 pp = uma_zalloc(pipe_zone, M_WAITOK);
334 #ifdef MAC
335 /*
336 * The MAC label is shared between the connected endpoints. As a
337 * result mac_pipe_init() and mac_pipe_create() are called once
338 * for the pair, and not on the endpoints.
339 */
340 mac_pipe_init(pp);
341 mac_pipe_create(td->td_ucred, pp);
342 #endif
343 rpipe = &pp->pp_rpipe;
344 wpipe = &pp->pp_wpipe;
345
346 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
347 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
348
349 /* Only the forward direction pipe is backed by default */
350 if ((error = pipe_create(rpipe, 1)) != 0 ||
351 (error = pipe_create(wpipe, 0)) != 0) {
352 pipeclose(rpipe);
353 pipeclose(wpipe);
354 return (error);
355 }
356
357 rpipe->pipe_state |= PIPE_DIRECTOK;
358 wpipe->pipe_state |= PIPE_DIRECTOK;
359
360 error = falloc(td, &rf, &fd, 0);
361 if (error) {
362 pipeclose(rpipe);
363 pipeclose(wpipe);
364 return (error);
365 }
366 /* An extra reference on `rf' has been held for us by falloc(). */
367 fildes[0] = fd;
368
369 /*
370 * Warning: once we've gotten past allocation of the fd for the
371 * read-side, we can only drop the read side via fdrop() in order
372 * to avoid races against processes which manage to dup() the read
373 * side while we are blocked trying to allocate the write side.
374 */
375 finit(rf, FREAD | FWRITE, DTYPE_PIPE, rpipe, &pipeops);
376 error = falloc(td, &wf, &fd, 0);
377 if (error) {
378 fdclose(fdp, rf, fildes[0], td);
379 fdrop(rf, td);
380 /* rpipe has been closed by fdrop(). */
381 pipeclose(wpipe);
382 return (error);
383 }
384 /* An extra reference on `wf' has been held for us by falloc(). */
385 finit(wf, FREAD | FWRITE, DTYPE_PIPE, wpipe, &pipeops);
386 fdrop(wf, td);
387 fildes[1] = fd;
388 fdrop(rf, td);
389
390 return (0);
391 }
392
393 /* ARGSUSED */
394 int
395 sys_pipe(struct thread *td, struct pipe_args *uap)
396 {
397 int error;
398 int fildes[2];
399
400 error = kern_pipe(td, fildes);
401 if (error)
402 return (error);
403
404 td->td_retval[0] = fildes[0];
405 td->td_retval[1] = fildes[1];
406
407 return (0);
408 }
409
410 /*
411 * Allocate kva for pipe circular buffer, the space is pageable
412 * This routine will 'realloc' the size of a pipe safely, if it fails
413 * it will retain the old buffer.
414 * If it fails it will return ENOMEM.
415 */
416 static int
417 pipespace_new(cpipe, size)
418 struct pipe *cpipe;
419 int size;
420 {
421 caddr_t buffer;
422 int error, cnt, firstseg;
423 static int curfail = 0;
424 static struct timeval lastfail;
425
426 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
427 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
428 ("pipespace: resize of direct writes not allowed"));
429 retry:
430 cnt = cpipe->pipe_buffer.cnt;
431 if (cnt > size)
432 size = cnt;
433
434 size = round_page(size);
435 buffer = (caddr_t) vm_map_min(pipe_map);
436
437 error = vm_map_find(pipe_map, NULL, 0,
438 (vm_offset_t *) &buffer, size, 1,
439 VM_PROT_ALL, VM_PROT_ALL, 0);
440 if (error != KERN_SUCCESS) {
441 if ((cpipe->pipe_buffer.buffer == NULL) &&
442 (size > SMALL_PIPE_SIZE)) {
443 size = SMALL_PIPE_SIZE;
444 pipefragretry++;
445 goto retry;
446 }
447 if (cpipe->pipe_buffer.buffer == NULL) {
448 pipeallocfail++;
449 if (ppsratecheck(&lastfail, &curfail, 1))
450 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
451 } else {
452 piperesizefail++;
453 }
454 return (ENOMEM);
455 }
456
457 /* copy data, then free old resources if we're resizing */
458 if (cnt > 0) {
459 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
460 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
461 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
462 buffer, firstseg);
463 if ((cnt - firstseg) > 0)
464 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
465 cpipe->pipe_buffer.in);
466 } else {
467 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
468 buffer, cnt);
469 }
470 }
471 pipe_free_kmem(cpipe);
472 cpipe->pipe_buffer.buffer = buffer;
473 cpipe->pipe_buffer.size = size;
474 cpipe->pipe_buffer.in = cnt;
475 cpipe->pipe_buffer.out = 0;
476 cpipe->pipe_buffer.cnt = cnt;
477 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
478 return (0);
479 }
480
481 /*
482 * Wrapper for pipespace_new() that performs locking assertions.
483 */
484 static int
485 pipespace(cpipe, size)
486 struct pipe *cpipe;
487 int size;
488 {
489
490 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
491 ("Unlocked pipe passed to pipespace"));
492 return (pipespace_new(cpipe, size));
493 }
494
495 /*
496 * lock a pipe for I/O, blocking other access
497 */
498 static __inline int
499 pipelock(cpipe, catch)
500 struct pipe *cpipe;
501 int catch;
502 {
503 int error;
504
505 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
506 while (cpipe->pipe_state & PIPE_LOCKFL) {
507 cpipe->pipe_state |= PIPE_LWANT;
508 error = msleep(cpipe, PIPE_MTX(cpipe),
509 catch ? (PRIBIO | PCATCH) : PRIBIO,
510 "pipelk", 0);
511 if (error != 0)
512 return (error);
513 }
514 cpipe->pipe_state |= PIPE_LOCKFL;
515 return (0);
516 }
517
518 /*
519 * unlock a pipe I/O lock
520 */
521 static __inline void
522 pipeunlock(cpipe)
523 struct pipe *cpipe;
524 {
525
526 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
527 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
528 ("Unlocked pipe passed to pipeunlock"));
529 cpipe->pipe_state &= ~PIPE_LOCKFL;
530 if (cpipe->pipe_state & PIPE_LWANT) {
531 cpipe->pipe_state &= ~PIPE_LWANT;
532 wakeup(cpipe);
533 }
534 }
535
536 static __inline void
537 pipeselwakeup(cpipe)
538 struct pipe *cpipe;
539 {
540
541 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
542 if (cpipe->pipe_state & PIPE_SEL) {
543 selwakeuppri(&cpipe->pipe_sel, PSOCK);
544 if (!SEL_WAITING(&cpipe->pipe_sel))
545 cpipe->pipe_state &= ~PIPE_SEL;
546 }
547 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
548 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
549 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
550 }
551
552 /*
553 * Initialize and allocate VM and memory for pipe. The structure
554 * will start out zero'd from the ctor, so we just manage the kmem.
555 */
556 static int
557 pipe_create(pipe, backing)
558 struct pipe *pipe;
559 int backing;
560 {
561 int error;
562
563 if (backing) {
564 if (amountpipekva > maxpipekva / 2)
565 error = pipespace_new(pipe, SMALL_PIPE_SIZE);
566 else
567 error = pipespace_new(pipe, PIPE_SIZE);
568 } else {
569 /* If we're not backing this pipe, no need to do anything. */
570 error = 0;
571 }
572 pipe->pipe_ino = -1;
573 return (error);
574 }
575
576 /* ARGSUSED */
577 static int
578 pipe_read(fp, uio, active_cred, flags, td)
579 struct file *fp;
580 struct uio *uio;
581 struct ucred *active_cred;
582 struct thread *td;
583 int flags;
584 {
585 struct pipe *rpipe = fp->f_data;
586 int error;
587 int nread = 0;
588 int size;
589
590 PIPE_LOCK(rpipe);
591 ++rpipe->pipe_busy;
592 error = pipelock(rpipe, 1);
593 if (error)
594 goto unlocked_error;
595
596 #ifdef MAC
597 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
598 if (error)
599 goto locked_error;
600 #endif
601 if (amountpipekva > (3 * maxpipekva) / 4) {
602 if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
603 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
604 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
605 (piperesizeallowed == 1)) {
606 PIPE_UNLOCK(rpipe);
607 pipespace(rpipe, SMALL_PIPE_SIZE);
608 PIPE_LOCK(rpipe);
609 }
610 }
611
612 while (uio->uio_resid) {
613 /*
614 * normal pipe buffer receive
615 */
616 if (rpipe->pipe_buffer.cnt > 0) {
617 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
618 if (size > rpipe->pipe_buffer.cnt)
619 size = rpipe->pipe_buffer.cnt;
620 if (size > uio->uio_resid)
621 size = uio->uio_resid;
622
623 PIPE_UNLOCK(rpipe);
624 error = uiomove(
625 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
626 size, uio);
627 PIPE_LOCK(rpipe);
628 if (error)
629 break;
630
631 rpipe->pipe_buffer.out += size;
632 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
633 rpipe->pipe_buffer.out = 0;
634
635 rpipe->pipe_buffer.cnt -= size;
636
637 /*
638 * If there is no more to read in the pipe, reset
639 * its pointers to the beginning. This improves
640 * cache hit stats.
641 */
642 if (rpipe->pipe_buffer.cnt == 0) {
643 rpipe->pipe_buffer.in = 0;
644 rpipe->pipe_buffer.out = 0;
645 }
646 nread += size;
647 #ifndef PIPE_NODIRECT
648 /*
649 * Direct copy, bypassing a kernel buffer.
650 */
651 } else if ((size = rpipe->pipe_map.cnt) &&
652 (rpipe->pipe_state & PIPE_DIRECTW)) {
653 if (size > uio->uio_resid)
654 size = (u_int) uio->uio_resid;
655
656 PIPE_UNLOCK(rpipe);
657 error = uiomove_fromphys(rpipe->pipe_map.ms,
658 rpipe->pipe_map.pos, size, uio);
659 PIPE_LOCK(rpipe);
660 if (error)
661 break;
662 nread += size;
663 rpipe->pipe_map.pos += size;
664 rpipe->pipe_map.cnt -= size;
665 if (rpipe->pipe_map.cnt == 0) {
666 rpipe->pipe_state &= ~PIPE_DIRECTW;
667 wakeup(rpipe);
668 }
669 #endif
670 } else {
671 /*
672 * detect EOF condition
673 * read returns 0 on EOF, no need to set error
674 */
675 if (rpipe->pipe_state & PIPE_EOF)
676 break;
677
678 /*
679 * If the "write-side" has been blocked, wake it up now.
680 */
681 if (rpipe->pipe_state & PIPE_WANTW) {
682 rpipe->pipe_state &= ~PIPE_WANTW;
683 wakeup(rpipe);
684 }
685
686 /*
687 * Break if some data was read.
688 */
689 if (nread > 0)
690 break;
691
692 /*
693 * Unlock the pipe buffer for our remaining processing.
694 * We will either break out with an error or we will
695 * sleep and relock to loop.
696 */
697 pipeunlock(rpipe);
698
699 /*
700 * Handle non-blocking mode operation or
701 * wait for more data.
702 */
703 if (fp->f_flag & FNONBLOCK) {
704 error = EAGAIN;
705 } else {
706 rpipe->pipe_state |= PIPE_WANTR;
707 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
708 PRIBIO | PCATCH,
709 "piperd", 0)) == 0)
710 error = pipelock(rpipe, 1);
711 }
712 if (error)
713 goto unlocked_error;
714 }
715 }
716 #ifdef MAC
717 locked_error:
718 #endif
719 pipeunlock(rpipe);
720
721 /* XXX: should probably do this before getting any locks. */
722 if (error == 0)
723 vfs_timestamp(&rpipe->pipe_atime);
724 unlocked_error:
725 --rpipe->pipe_busy;
726
727 /*
728 * PIPE_WANT processing only makes sense if pipe_busy is 0.
729 */
730 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
731 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
732 wakeup(rpipe);
733 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
734 /*
735 * Handle write blocking hysteresis.
736 */
737 if (rpipe->pipe_state & PIPE_WANTW) {
738 rpipe->pipe_state &= ~PIPE_WANTW;
739 wakeup(rpipe);
740 }
741 }
742
743 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
744 pipeselwakeup(rpipe);
745
746 PIPE_UNLOCK(rpipe);
747 return (error);
748 }
749
750 #ifndef PIPE_NODIRECT
751 /*
752 * Map the sending processes' buffer into kernel space and wire it.
753 * This is similar to a physical write operation.
754 */
755 static int
756 pipe_build_write_buffer(wpipe, uio)
757 struct pipe *wpipe;
758 struct uio *uio;
759 {
760 u_int size;
761 int i;
762
763 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
764 KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
765 ("Clone attempt on non-direct write pipe!"));
766
767 if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
768 size = wpipe->pipe_buffer.size;
769 else
770 size = uio->uio_iov->iov_len;
771
772 if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
773 (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
774 wpipe->pipe_map.ms, PIPENPAGES)) < 0)
775 return (EFAULT);
776
777 /*
778 * set up the control block
779 */
780 wpipe->pipe_map.npages = i;
781 wpipe->pipe_map.pos =
782 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
783 wpipe->pipe_map.cnt = size;
784
785 /*
786 * and update the uio data
787 */
788
789 uio->uio_iov->iov_len -= size;
790 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
791 if (uio->uio_iov->iov_len == 0)
792 uio->uio_iov++;
793 uio->uio_resid -= size;
794 uio->uio_offset += size;
795 return (0);
796 }
797
798 /*
799 * unmap and unwire the process buffer
800 */
801 static void
802 pipe_destroy_write_buffer(wpipe)
803 struct pipe *wpipe;
804 {
805
806 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
807 vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
808 wpipe->pipe_map.npages = 0;
809 }
810
811 /*
812 * In the case of a signal, the writing process might go away. This
813 * code copies the data into the circular buffer so that the source
814 * pages can be freed without loss of data.
815 */
816 static void
817 pipe_clone_write_buffer(wpipe)
818 struct pipe *wpipe;
819 {
820 struct uio uio;
821 struct iovec iov;
822 int size;
823 int pos;
824
825 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
826 size = wpipe->pipe_map.cnt;
827 pos = wpipe->pipe_map.pos;
828
829 wpipe->pipe_buffer.in = size;
830 wpipe->pipe_buffer.out = 0;
831 wpipe->pipe_buffer.cnt = size;
832 wpipe->pipe_state &= ~PIPE_DIRECTW;
833
834 PIPE_UNLOCK(wpipe);
835 iov.iov_base = wpipe->pipe_buffer.buffer;
836 iov.iov_len = size;
837 uio.uio_iov = &iov;
838 uio.uio_iovcnt = 1;
839 uio.uio_offset = 0;
840 uio.uio_resid = size;
841 uio.uio_segflg = UIO_SYSSPACE;
842 uio.uio_rw = UIO_READ;
843 uio.uio_td = curthread;
844 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
845 PIPE_LOCK(wpipe);
846 pipe_destroy_write_buffer(wpipe);
847 }
848
849 /*
850 * This implements the pipe buffer write mechanism. Note that only
851 * a direct write OR a normal pipe write can be pending at any given time.
852 * If there are any characters in the pipe buffer, the direct write will
853 * be deferred until the receiving process grabs all of the bytes from
854 * the pipe buffer. Then the direct mapping write is set-up.
855 */
856 static int
857 pipe_direct_write(wpipe, uio)
858 struct pipe *wpipe;
859 struct uio *uio;
860 {
861 int error;
862
863 retry:
864 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
865 error = pipelock(wpipe, 1);
866 if (wpipe->pipe_state & PIPE_EOF)
867 error = EPIPE;
868 if (error) {
869 pipeunlock(wpipe);
870 goto error1;
871 }
872 while (wpipe->pipe_state & PIPE_DIRECTW) {
873 if (wpipe->pipe_state & PIPE_WANTR) {
874 wpipe->pipe_state &= ~PIPE_WANTR;
875 wakeup(wpipe);
876 }
877 pipeselwakeup(wpipe);
878 wpipe->pipe_state |= PIPE_WANTW;
879 pipeunlock(wpipe);
880 error = msleep(wpipe, PIPE_MTX(wpipe),
881 PRIBIO | PCATCH, "pipdww", 0);
882 if (error)
883 goto error1;
884 else
885 goto retry;
886 }
887 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
888 if (wpipe->pipe_buffer.cnt > 0) {
889 if (wpipe->pipe_state & PIPE_WANTR) {
890 wpipe->pipe_state &= ~PIPE_WANTR;
891 wakeup(wpipe);
892 }
893 pipeselwakeup(wpipe);
894 wpipe->pipe_state |= PIPE_WANTW;
895 pipeunlock(wpipe);
896 error = msleep(wpipe, PIPE_MTX(wpipe),
897 PRIBIO | PCATCH, "pipdwc", 0);
898 if (error)
899 goto error1;
900 else
901 goto retry;
902 }
903
904 wpipe->pipe_state |= PIPE_DIRECTW;
905
906 PIPE_UNLOCK(wpipe);
907 error = pipe_build_write_buffer(wpipe, uio);
908 PIPE_LOCK(wpipe);
909 if (error) {
910 wpipe->pipe_state &= ~PIPE_DIRECTW;
911 pipeunlock(wpipe);
912 goto error1;
913 }
914
915 error = 0;
916 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
917 if (wpipe->pipe_state & PIPE_EOF) {
918 pipe_destroy_write_buffer(wpipe);
919 pipeselwakeup(wpipe);
920 pipeunlock(wpipe);
921 error = EPIPE;
922 goto error1;
923 }
924 if (wpipe->pipe_state & PIPE_WANTR) {
925 wpipe->pipe_state &= ~PIPE_WANTR;
926 wakeup(wpipe);
927 }
928 pipeselwakeup(wpipe);
929 pipeunlock(wpipe);
930 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
931 "pipdwt", 0);
932 pipelock(wpipe, 0);
933 }
934
935 if (wpipe->pipe_state & PIPE_EOF)
936 error = EPIPE;
937 if (wpipe->pipe_state & PIPE_DIRECTW) {
938 /*
939 * this bit of trickery substitutes a kernel buffer for
940 * the process that might be going away.
941 */
942 pipe_clone_write_buffer(wpipe);
943 } else {
944 pipe_destroy_write_buffer(wpipe);
945 }
946 pipeunlock(wpipe);
947 return (error);
948
949 error1:
950 wakeup(wpipe);
951 return (error);
952 }
953 #endif
954
955 static int
956 pipe_write(fp, uio, active_cred, flags, td)
957 struct file *fp;
958 struct uio *uio;
959 struct ucred *active_cred;
960 struct thread *td;
961 int flags;
962 {
963 int error = 0;
964 int desiredsize;
965 ssize_t orig_resid;
966 struct pipe *wpipe, *rpipe;
967
968 rpipe = fp->f_data;
969 wpipe = rpipe->pipe_peer;
970
971 PIPE_LOCK(rpipe);
972 error = pipelock(wpipe, 1);
973 if (error) {
974 PIPE_UNLOCK(rpipe);
975 return (error);
976 }
977 /*
978 * detect loss of pipe read side, issue SIGPIPE if lost.
979 */
980 if (wpipe->pipe_present != PIPE_ACTIVE ||
981 (wpipe->pipe_state & PIPE_EOF)) {
982 pipeunlock(wpipe);
983 PIPE_UNLOCK(rpipe);
984 return (EPIPE);
985 }
986 #ifdef MAC
987 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
988 if (error) {
989 pipeunlock(wpipe);
990 PIPE_UNLOCK(rpipe);
991 return (error);
992 }
993 #endif
994 ++wpipe->pipe_busy;
995
996 /* Choose a larger size if it's advantageous */
997 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
998 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
999 if (piperesizeallowed != 1)
1000 break;
1001 if (amountpipekva > maxpipekva / 2)
1002 break;
1003 if (desiredsize == BIG_PIPE_SIZE)
1004 break;
1005 desiredsize = desiredsize * 2;
1006 }
1007
1008 /* Choose a smaller size if we're in a OOM situation */
1009 if ((amountpipekva > (3 * maxpipekva) / 4) &&
1010 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1011 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1012 (piperesizeallowed == 1))
1013 desiredsize = SMALL_PIPE_SIZE;
1014
1015 /* Resize if the above determined that a new size was necessary */
1016 if ((desiredsize != wpipe->pipe_buffer.size) &&
1017 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1018 PIPE_UNLOCK(wpipe);
1019 pipespace(wpipe, desiredsize);
1020 PIPE_LOCK(wpipe);
1021 }
1022 if (wpipe->pipe_buffer.size == 0) {
1023 /*
1024 * This can only happen for reverse direction use of pipes
1025 * in a complete OOM situation.
1026 */
1027 error = ENOMEM;
1028 --wpipe->pipe_busy;
1029 pipeunlock(wpipe);
1030 PIPE_UNLOCK(wpipe);
1031 return (error);
1032 }
1033
1034 pipeunlock(wpipe);
1035
1036 orig_resid = uio->uio_resid;
1037
1038 while (uio->uio_resid) {
1039 int space;
1040
1041 pipelock(wpipe, 0);
1042 if (wpipe->pipe_state & PIPE_EOF) {
1043 pipeunlock(wpipe);
1044 error = EPIPE;
1045 break;
1046 }
1047 #ifndef PIPE_NODIRECT
1048 /*
1049 * If the transfer is large, we can gain performance if
1050 * we do process-to-process copies directly.
1051 * If the write is non-blocking, we don't use the
1052 * direct write mechanism.
1053 *
1054 * The direct write mechanism will detect the reader going
1055 * away on us.
1056 */
1057 if (uio->uio_segflg == UIO_USERSPACE &&
1058 uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1059 wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1060 (fp->f_flag & FNONBLOCK) == 0) {
1061 pipeunlock(wpipe);
1062 error = pipe_direct_write(wpipe, uio);
1063 if (error)
1064 break;
1065 continue;
1066 }
1067 #endif
1068
1069 /*
1070 * Pipe buffered writes cannot be coincidental with
1071 * direct writes. We wait until the currently executing
1072 * direct write is completed before we start filling the
1073 * pipe buffer. We break out if a signal occurs or the
1074 * reader goes away.
1075 */
1076 if (wpipe->pipe_state & PIPE_DIRECTW) {
1077 if (wpipe->pipe_state & PIPE_WANTR) {
1078 wpipe->pipe_state &= ~PIPE_WANTR;
1079 wakeup(wpipe);
1080 }
1081 pipeselwakeup(wpipe);
1082 wpipe->pipe_state |= PIPE_WANTW;
1083 pipeunlock(wpipe);
1084 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1085 "pipbww", 0);
1086 if (error)
1087 break;
1088 else
1089 continue;
1090 }
1091
1092 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1093
1094 /* Writes of size <= PIPE_BUF must be atomic. */
1095 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1096 space = 0;
1097
1098 if (space > 0) {
1099 int size; /* Transfer size */
1100 int segsize; /* first segment to transfer */
1101
1102 /*
1103 * Transfer size is minimum of uio transfer
1104 * and free space in pipe buffer.
1105 */
1106 if (space > uio->uio_resid)
1107 size = uio->uio_resid;
1108 else
1109 size = space;
1110 /*
1111 * First segment to transfer is minimum of
1112 * transfer size and contiguous space in
1113 * pipe buffer. If first segment to transfer
1114 * is less than the transfer size, we've got
1115 * a wraparound in the buffer.
1116 */
1117 segsize = wpipe->pipe_buffer.size -
1118 wpipe->pipe_buffer.in;
1119 if (segsize > size)
1120 segsize = size;
1121
1122 /* Transfer first segment */
1123
1124 PIPE_UNLOCK(rpipe);
1125 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1126 segsize, uio);
1127 PIPE_LOCK(rpipe);
1128
1129 if (error == 0 && segsize < size) {
1130 KASSERT(wpipe->pipe_buffer.in + segsize ==
1131 wpipe->pipe_buffer.size,
1132 ("Pipe buffer wraparound disappeared"));
1133 /*
1134 * Transfer remaining part now, to
1135 * support atomic writes. Wraparound
1136 * happened.
1137 */
1138
1139 PIPE_UNLOCK(rpipe);
1140 error = uiomove(
1141 &wpipe->pipe_buffer.buffer[0],
1142 size - segsize, uio);
1143 PIPE_LOCK(rpipe);
1144 }
1145 if (error == 0) {
1146 wpipe->pipe_buffer.in += size;
1147 if (wpipe->pipe_buffer.in >=
1148 wpipe->pipe_buffer.size) {
1149 KASSERT(wpipe->pipe_buffer.in ==
1150 size - segsize +
1151 wpipe->pipe_buffer.size,
1152 ("Expected wraparound bad"));
1153 wpipe->pipe_buffer.in = size - segsize;
1154 }
1155
1156 wpipe->pipe_buffer.cnt += size;
1157 KASSERT(wpipe->pipe_buffer.cnt <=
1158 wpipe->pipe_buffer.size,
1159 ("Pipe buffer overflow"));
1160 }
1161 pipeunlock(wpipe);
1162 if (error != 0)
1163 break;
1164 } else {
1165 /*
1166 * If the "read-side" has been blocked, wake it up now.
1167 */
1168 if (wpipe->pipe_state & PIPE_WANTR) {
1169 wpipe->pipe_state &= ~PIPE_WANTR;
1170 wakeup(wpipe);
1171 }
1172
1173 /*
1174 * don't block on non-blocking I/O
1175 */
1176 if (fp->f_flag & FNONBLOCK) {
1177 error = EAGAIN;
1178 pipeunlock(wpipe);
1179 break;
1180 }
1181
1182 /*
1183 * We have no more space and have something to offer,
1184 * wake up select/poll.
1185 */
1186 pipeselwakeup(wpipe);
1187
1188 wpipe->pipe_state |= PIPE_WANTW;
1189 pipeunlock(wpipe);
1190 error = msleep(wpipe, PIPE_MTX(rpipe),
1191 PRIBIO | PCATCH, "pipewr", 0);
1192 if (error != 0)
1193 break;
1194 }
1195 }
1196
1197 pipelock(wpipe, 0);
1198 --wpipe->pipe_busy;
1199
1200 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1201 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1202 wakeup(wpipe);
1203 } else if (wpipe->pipe_buffer.cnt > 0) {
1204 /*
1205 * If we have put any characters in the buffer, we wake up
1206 * the reader.
1207 */
1208 if (wpipe->pipe_state & PIPE_WANTR) {
1209 wpipe->pipe_state &= ~PIPE_WANTR;
1210 wakeup(wpipe);
1211 }
1212 }
1213
1214 /*
1215 * Don't return EPIPE if I/O was successful
1216 */
1217 if ((wpipe->pipe_buffer.cnt == 0) &&
1218 (uio->uio_resid == 0) &&
1219 (error == EPIPE)) {
1220 error = 0;
1221 }
1222
1223 if (error == 0)
1224 vfs_timestamp(&wpipe->pipe_mtime);
1225
1226 /*
1227 * We have something to offer,
1228 * wake up select/poll.
1229 */
1230 if (wpipe->pipe_buffer.cnt)
1231 pipeselwakeup(wpipe);
1232
1233 pipeunlock(wpipe);
1234 PIPE_UNLOCK(rpipe);
1235 return (error);
1236 }
1237
1238 /* ARGSUSED */
1239 static int
1240 pipe_truncate(fp, length, active_cred, td)
1241 struct file *fp;
1242 off_t length;
1243 struct ucred *active_cred;
1244 struct thread *td;
1245 {
1246
1247 return (EINVAL);
1248 }
1249
1250 /*
1251 * we implement a very minimal set of ioctls for compatibility with sockets.
1252 */
1253 static int
1254 pipe_ioctl(fp, cmd, data, active_cred, td)
1255 struct file *fp;
1256 u_long cmd;
1257 void *data;
1258 struct ucred *active_cred;
1259 struct thread *td;
1260 {
1261 struct pipe *mpipe = fp->f_data;
1262 int error;
1263
1264 PIPE_LOCK(mpipe);
1265
1266 #ifdef MAC
1267 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1268 if (error) {
1269 PIPE_UNLOCK(mpipe);
1270 return (error);
1271 }
1272 #endif
1273
1274 error = 0;
1275 switch (cmd) {
1276
1277 case FIONBIO:
1278 break;
1279
1280 case FIOASYNC:
1281 if (*(int *)data) {
1282 mpipe->pipe_state |= PIPE_ASYNC;
1283 } else {
1284 mpipe->pipe_state &= ~PIPE_ASYNC;
1285 }
1286 break;
1287
1288 case FIONREAD:
1289 if (mpipe->pipe_state & PIPE_DIRECTW)
1290 *(int *)data = mpipe->pipe_map.cnt;
1291 else
1292 *(int *)data = mpipe->pipe_buffer.cnt;
1293 break;
1294
1295 case FIOSETOWN:
1296 PIPE_UNLOCK(mpipe);
1297 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1298 goto out_unlocked;
1299
1300 case FIOGETOWN:
1301 *(int *)data = fgetown(&mpipe->pipe_sigio);
1302 break;
1303
1304 /* This is deprecated, FIOSETOWN should be used instead. */
1305 case TIOCSPGRP:
1306 PIPE_UNLOCK(mpipe);
1307 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1308 goto out_unlocked;
1309
1310 /* This is deprecated, FIOGETOWN should be used instead. */
1311 case TIOCGPGRP:
1312 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1313 break;
1314
1315 default:
1316 error = ENOTTY;
1317 break;
1318 }
1319 PIPE_UNLOCK(mpipe);
1320 out_unlocked:
1321 return (error);
1322 }
1323
1324 static int
1325 pipe_poll(fp, events, active_cred, td)
1326 struct file *fp;
1327 int events;
1328 struct ucred *active_cred;
1329 struct thread *td;
1330 {
1331 struct pipe *rpipe = fp->f_data;
1332 struct pipe *wpipe;
1333 int revents = 0;
1334 #ifdef MAC
1335 int error;
1336 #endif
1337
1338 wpipe = rpipe->pipe_peer;
1339 PIPE_LOCK(rpipe);
1340 #ifdef MAC
1341 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1342 if (error)
1343 goto locked_error;
1344 #endif
1345 if (events & (POLLIN | POLLRDNORM))
1346 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1347 (rpipe->pipe_buffer.cnt > 0))
1348 revents |= events & (POLLIN | POLLRDNORM);
1349
1350 if (events & (POLLOUT | POLLWRNORM))
1351 if (wpipe->pipe_present != PIPE_ACTIVE ||
1352 (wpipe->pipe_state & PIPE_EOF) ||
1353 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1354 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
1355 wpipe->pipe_buffer.size == 0)))
1356 revents |= events & (POLLOUT | POLLWRNORM);
1357
1358 if ((events & POLLINIGNEOF) == 0) {
1359 if (rpipe->pipe_state & PIPE_EOF) {
1360 revents |= (events & (POLLIN | POLLRDNORM));
1361 if (wpipe->pipe_present != PIPE_ACTIVE ||
1362 (wpipe->pipe_state & PIPE_EOF))
1363 revents |= POLLHUP;
1364 }
1365 }
1366
1367 if (revents == 0) {
1368 if (events & (POLLIN | POLLRDNORM)) {
1369 selrecord(td, &rpipe->pipe_sel);
1370 if (SEL_WAITING(&rpipe->pipe_sel))
1371 rpipe->pipe_state |= PIPE_SEL;
1372 }
1373
1374 if (events & (POLLOUT | POLLWRNORM)) {
1375 selrecord(td, &wpipe->pipe_sel);
1376 if (SEL_WAITING(&wpipe->pipe_sel))
1377 wpipe->pipe_state |= PIPE_SEL;
1378 }
1379 }
1380 #ifdef MAC
1381 locked_error:
1382 #endif
1383 PIPE_UNLOCK(rpipe);
1384
1385 return (revents);
1386 }
1387
1388 /*
1389 * We shouldn't need locks here as we're doing a read and this should
1390 * be a natural race.
1391 */
1392 static int
1393 pipe_stat(fp, ub, active_cred, td)
1394 struct file *fp;
1395 struct stat *ub;
1396 struct ucred *active_cred;
1397 struct thread *td;
1398 {
1399 struct pipe *pipe;
1400 int new_unr;
1401 #ifdef MAC
1402 int error;
1403 #endif
1404
1405 pipe = fp->f_data;
1406 PIPE_LOCK(pipe);
1407 #ifdef MAC
1408 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1409 if (error) {
1410 PIPE_UNLOCK(pipe);
1411 return (error);
1412 }
1413 #endif
1414 /*
1415 * Lazily allocate an inode number for the pipe. Most pipe
1416 * users do not call fstat(2) on the pipe, which means that
1417 * postponing the inode allocation until it is must be
1418 * returned to userland is useful. If alloc_unr failed,
1419 * assign st_ino zero instead of returning an error.
1420 * Special pipe_ino values:
1421 * -1 - not yet initialized;
1422 * 0 - alloc_unr failed, return 0 as st_ino forever.
1423 */
1424 if (pipe->pipe_ino == (ino_t)-1) {
1425 new_unr = alloc_unr(pipeino_unr);
1426 if (new_unr != -1)
1427 pipe->pipe_ino = new_unr;
1428 else
1429 pipe->pipe_ino = 0;
1430 }
1431 PIPE_UNLOCK(pipe);
1432
1433 bzero(ub, sizeof(*ub));
1434 ub->st_mode = S_IFIFO;
1435 ub->st_blksize = PAGE_SIZE;
1436 if (pipe->pipe_state & PIPE_DIRECTW)
1437 ub->st_size = pipe->pipe_map.cnt;
1438 else
1439 ub->st_size = pipe->pipe_buffer.cnt;
1440 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1441 ub->st_atim = pipe->pipe_atime;
1442 ub->st_mtim = pipe->pipe_mtime;
1443 ub->st_ctim = pipe->pipe_ctime;
1444 ub->st_uid = fp->f_cred->cr_uid;
1445 ub->st_gid = fp->f_cred->cr_gid;
1446 ub->st_dev = pipedev_ino;
1447 ub->st_ino = pipe->pipe_ino;
1448 /*
1449 * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1450 */
1451 return (0);
1452 }
1453
1454 /* ARGSUSED */
1455 static int
1456 pipe_close(fp, td)
1457 struct file *fp;
1458 struct thread *td;
1459 {
1460 struct pipe *cpipe = fp->f_data;
1461
1462 fp->f_ops = &badfileops;
1463 fp->f_data = NULL;
1464 funsetown(&cpipe->pipe_sigio);
1465 pipeclose(cpipe);
1466 return (0);
1467 }
1468
1469 static void
1470 pipe_free_kmem(cpipe)
1471 struct pipe *cpipe;
1472 {
1473
1474 KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1475 ("pipe_free_kmem: pipe mutex locked"));
1476
1477 if (cpipe->pipe_buffer.buffer != NULL) {
1478 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
1479 vm_map_remove(pipe_map,
1480 (vm_offset_t)cpipe->pipe_buffer.buffer,
1481 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
1482 cpipe->pipe_buffer.buffer = NULL;
1483 }
1484 #ifndef PIPE_NODIRECT
1485 {
1486 cpipe->pipe_map.cnt = 0;
1487 cpipe->pipe_map.pos = 0;
1488 cpipe->pipe_map.npages = 0;
1489 }
1490 #endif
1491 }
1492
1493 /*
1494 * shutdown the pipe
1495 */
1496 static void
1497 pipeclose(cpipe)
1498 struct pipe *cpipe;
1499 {
1500 struct pipepair *pp;
1501 struct pipe *ppipe;
1502 ino_t ino;
1503
1504 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1505
1506 PIPE_LOCK(cpipe);
1507 pipelock(cpipe, 0);
1508 pp = cpipe->pipe_pair;
1509
1510 pipeselwakeup(cpipe);
1511
1512 /*
1513 * If the other side is blocked, wake it up saying that
1514 * we want to close it down.
1515 */
1516 cpipe->pipe_state |= PIPE_EOF;
1517 while (cpipe->pipe_busy) {
1518 wakeup(cpipe);
1519 cpipe->pipe_state |= PIPE_WANT;
1520 pipeunlock(cpipe);
1521 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1522 pipelock(cpipe, 0);
1523 }
1524
1525
1526 /*
1527 * Disconnect from peer, if any.
1528 */
1529 ppipe = cpipe->pipe_peer;
1530 if (ppipe->pipe_present == PIPE_ACTIVE) {
1531 pipeselwakeup(ppipe);
1532
1533 ppipe->pipe_state |= PIPE_EOF;
1534 wakeup(ppipe);
1535 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1536 }
1537
1538 /*
1539 * Mark this endpoint as free. Release kmem resources. We
1540 * don't mark this endpoint as unused until we've finished
1541 * doing that, or the pipe might disappear out from under
1542 * us.
1543 */
1544 PIPE_UNLOCK(cpipe);
1545 pipe_free_kmem(cpipe);
1546 PIPE_LOCK(cpipe);
1547 cpipe->pipe_present = PIPE_CLOSING;
1548 pipeunlock(cpipe);
1549
1550 /*
1551 * knlist_clear() may sleep dropping the PIPE_MTX. Set the
1552 * PIPE_FINALIZED, that allows other end to free the
1553 * pipe_pair, only after the knotes are completely dismantled.
1554 */
1555 knlist_clear(&cpipe->pipe_sel.si_note, 1);
1556 cpipe->pipe_present = PIPE_FINALIZED;
1557 seldrain(&cpipe->pipe_sel);
1558 knlist_destroy(&cpipe->pipe_sel.si_note);
1559
1560 /*
1561 * Postpone the destroy of the fake inode number allocated for
1562 * our end, until pipe mtx is unlocked.
1563 */
1564 ino = cpipe->pipe_ino;
1565
1566 /*
1567 * If both endpoints are now closed, release the memory for the
1568 * pipe pair. If not, unlock.
1569 */
1570 if (ppipe->pipe_present == PIPE_FINALIZED) {
1571 PIPE_UNLOCK(cpipe);
1572 #ifdef MAC
1573 mac_pipe_destroy(pp);
1574 #endif
1575 uma_zfree(pipe_zone, cpipe->pipe_pair);
1576 } else
1577 PIPE_UNLOCK(cpipe);
1578
1579 if (ino != 0 && ino != (ino_t)-1)
1580 free_unr(pipeino_unr, ino);
1581 }
1582
1583 /*ARGSUSED*/
1584 static int
1585 pipe_kqfilter(struct file *fp, struct knote *kn)
1586 {
1587 struct pipe *cpipe;
1588
1589 cpipe = kn->kn_fp->f_data;
1590 PIPE_LOCK(cpipe);
1591 switch (kn->kn_filter) {
1592 case EVFILT_READ:
1593 kn->kn_fop = &pipe_rfiltops;
1594 break;
1595 case EVFILT_WRITE:
1596 kn->kn_fop = &pipe_wfiltops;
1597 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
1598 /* other end of pipe has been closed */
1599 PIPE_UNLOCK(cpipe);
1600 return (EPIPE);
1601 }
1602 cpipe = cpipe->pipe_peer;
1603 break;
1604 default:
1605 PIPE_UNLOCK(cpipe);
1606 return (EINVAL);
1607 }
1608
1609 knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
1610 PIPE_UNLOCK(cpipe);
1611 return (0);
1612 }
1613
1614 static void
1615 filt_pipedetach(struct knote *kn)
1616 {
1617 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1618
1619 PIPE_LOCK(cpipe);
1620 if (kn->kn_filter == EVFILT_WRITE)
1621 cpipe = cpipe->pipe_peer;
1622 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
1623 PIPE_UNLOCK(cpipe);
1624 }
1625
1626 /*ARGSUSED*/
1627 static int
1628 filt_piperead(struct knote *kn, long hint)
1629 {
1630 struct pipe *rpipe = kn->kn_fp->f_data;
1631 struct pipe *wpipe = rpipe->pipe_peer;
1632 int ret;
1633
1634 PIPE_LOCK(rpipe);
1635 kn->kn_data = rpipe->pipe_buffer.cnt;
1636 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1637 kn->kn_data = rpipe->pipe_map.cnt;
1638
1639 if ((rpipe->pipe_state & PIPE_EOF) ||
1640 wpipe->pipe_present != PIPE_ACTIVE ||
1641 (wpipe->pipe_state & PIPE_EOF)) {
1642 kn->kn_flags |= EV_EOF;
1643 PIPE_UNLOCK(rpipe);
1644 return (1);
1645 }
1646 ret = kn->kn_data > 0;
1647 PIPE_UNLOCK(rpipe);
1648 return ret;
1649 }
1650
1651 /*ARGSUSED*/
1652 static int
1653 filt_pipewrite(struct knote *kn, long hint)
1654 {
1655 struct pipe *rpipe = kn->kn_fp->f_data;
1656 struct pipe *wpipe = rpipe->pipe_peer;
1657
1658 PIPE_LOCK(rpipe);
1659 if (wpipe->pipe_present != PIPE_ACTIVE ||
1660 (wpipe->pipe_state & PIPE_EOF)) {
1661 kn->kn_data = 0;
1662 kn->kn_flags |= EV_EOF;
1663 PIPE_UNLOCK(rpipe);
1664 return (1);
1665 }
1666 kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
1667 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
1668 if (wpipe->pipe_state & PIPE_DIRECTW)
1669 kn->kn_data = 0;
1670
1671 PIPE_UNLOCK(rpipe);
1672 return (kn->kn_data >= PIPE_BUF);
1673 }
Cache object: 02e54cd346d30f0d99a2adadb8b61244
|