FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_pipe.c
1 /*-
2 * Copyright (c) 1996 John S. Dyson
3 * Copyright (c) 2012 Giovanni Trematerra
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Absolutely no warranty of function or purpose is made by the author
16 * John S. Dyson.
17 * 4. Modifications may be freely made to this file if the above conditions
18 * are met.
19 */
20
21 /*
22 * This file contains a high-performance replacement for the socket-based
23 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
24 * all features of sockets, but does do everything that pipes normally
25 * do.
26 */
27
28 /*
29 * This code has two modes of operation, a small write mode and a large
30 * write mode. The small write mode acts like conventional pipes with
31 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
32 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
33 * and PIPE_SIZE in size, the sending process pins the underlying pages in
34 * memory, and the receiving process copies directly from these pinned pages
35 * in the sending process.
36 *
37 * If the sending process receives a signal, it is possible that it will
38 * go away, and certainly its address space can change, because control
39 * is returned back to the user-mode side. In that case, the pipe code
40 * arranges to copy the buffer supplied by the user process, to a pageable
41 * kernel buffer, and the receiving process will grab the data from the
42 * pageable kernel buffer. Since signals don't happen all that often,
43 * the copy operation is normally eliminated.
44 *
45 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
46 * happen for small transfers so that the system will not spend all of
47 * its time context switching.
48 *
49 * In order to limit the resource use of pipes, two sysctls exist:
50 *
51 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
52 * address space available to us in pipe_map. This value is normally
53 * autotuned, but may also be loader tuned.
54 *
55 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
56 * memory in use by pipes.
57 *
58 * Based on how large pipekva is relative to maxpipekva, the following
59 * will happen:
60 *
61 * 0% - 50%:
62 * New pipes are given 16K of memory backing, pipes may dynamically
63 * grow to as large as 64K where needed.
64 * 50% - 75%:
65 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
66 * existing pipes may NOT grow.
67 * 75% - 100%:
68 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
69 * existing pipes will be shrunk down to 4K whenever possible.
70 *
71 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If
72 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
73 * resize which MUST occur for reverse-direction pipes when they are
74 * first used.
75 *
76 * Additional information about the current state of pipes may be obtained
77 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
78 * and kern.ipc.piperesizefail.
79 *
80 * Locking rules: There are two locks present here: A mutex, used via
81 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via
82 * the flag, as mutexes can not persist over uiomove. The mutex
83 * exists only to guard access to the flag, and is not in itself a
84 * locking mechanism. Also note that there is only a single mutex for
85 * both directions of a pipe.
86 *
87 * As pipelock() may have to sleep before it can acquire the flag, it
88 * is important to reread all data after a call to pipelock(); everything
89 * in the structure may have changed.
90 */
91
92 #include <sys/cdefs.h>
93 __FBSDID("$FreeBSD$");
94
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/conf.h>
98 #include <sys/fcntl.h>
99 #include <sys/file.h>
100 #include <sys/filedesc.h>
101 #include <sys/filio.h>
102 #include <sys/kernel.h>
103 #include <sys/lock.h>
104 #include <sys/mutex.h>
105 #include <sys/ttycom.h>
106 #include <sys/stat.h>
107 #include <sys/malloc.h>
108 #include <sys/poll.h>
109 #include <sys/selinfo.h>
110 #include <sys/signalvar.h>
111 #include <sys/syscallsubr.h>
112 #include <sys/sysctl.h>
113 #include <sys/sysproto.h>
114 #include <sys/pipe.h>
115 #include <sys/proc.h>
116 #include <sys/vnode.h>
117 #include <sys/uio.h>
118 #include <sys/event.h>
119
120 #include <security/mac/mac_framework.h>
121
122 #include <vm/vm.h>
123 #include <vm/vm_param.h>
124 #include <vm/vm_object.h>
125 #include <vm/vm_kern.h>
126 #include <vm/vm_extern.h>
127 #include <vm/pmap.h>
128 #include <vm/vm_map.h>
129 #include <vm/vm_page.h>
130 #include <vm/uma.h>
131
132 /*
133 * Use this define if you want to disable *fancy* VM things. Expect an
134 * approx 30% decrease in transfer rate. This could be useful for
135 * NetBSD or OpenBSD.
136 */
137 /* #define PIPE_NODIRECT */
138
139 #define PIPE_PEER(pipe) \
140 (((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
141
142 /*
143 * interfaces to the outside world
144 */
145 static fo_rdwr_t pipe_read;
146 static fo_rdwr_t pipe_write;
147 static fo_truncate_t pipe_truncate;
148 static fo_ioctl_t pipe_ioctl;
149 static fo_poll_t pipe_poll;
150 static fo_kqfilter_t pipe_kqfilter;
151 static fo_stat_t pipe_stat;
152 static fo_close_t pipe_close;
153 static fo_chmod_t pipe_chmod;
154 static fo_chown_t pipe_chown;
155
156 struct fileops pipeops = {
157 .fo_read = pipe_read,
158 .fo_write = pipe_write,
159 .fo_truncate = pipe_truncate,
160 .fo_ioctl = pipe_ioctl,
161 .fo_poll = pipe_poll,
162 .fo_kqfilter = pipe_kqfilter,
163 .fo_stat = pipe_stat,
164 .fo_close = pipe_close,
165 .fo_chmod = pipe_chmod,
166 .fo_chown = pipe_chown,
167 .fo_sendfile = invfo_sendfile,
168 .fo_flags = DFLAG_PASSABLE
169 };
170
171 static void filt_pipedetach(struct knote *kn);
172 static void filt_pipedetach_notsup(struct knote *kn);
173 static int filt_pipenotsup(struct knote *kn, long hint);
174 static int filt_piperead(struct knote *kn, long hint);
175 static int filt_pipewrite(struct knote *kn, long hint);
176
177 static struct filterops pipe_nfiltops = {
178 .f_isfd = 1,
179 .f_detach = filt_pipedetach_notsup,
180 .f_event = filt_pipenotsup
181 };
182 static struct filterops pipe_rfiltops = {
183 .f_isfd = 1,
184 .f_detach = filt_pipedetach,
185 .f_event = filt_piperead
186 };
187 static struct filterops pipe_wfiltops = {
188 .f_isfd = 1,
189 .f_detach = filt_pipedetach,
190 .f_event = filt_pipewrite
191 };
192
193 /*
194 * Default pipe buffer size(s), this can be kind-of large now because pipe
195 * space is pageable. The pipe code will try to maintain locality of
196 * reference for performance reasons, so small amounts of outstanding I/O
197 * will not wipe the cache.
198 */
199 #define MINPIPESIZE (PIPE_SIZE/3)
200 #define MAXPIPESIZE (2*PIPE_SIZE/3)
201
202 static long amountpipekva;
203 static int pipefragretry;
204 static int pipeallocfail;
205 static int piperesizefail;
206 static int piperesizeallowed = 1;
207
208 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN,
209 &maxpipekva, 0, "Pipe KVA limit");
210 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
211 &amountpipekva, 0, "Pipe KVA usage");
212 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
213 &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
214 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
215 &pipeallocfail, 0, "Pipe allocation failures");
216 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
217 &piperesizefail, 0, "Pipe resize failures");
218 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
219 &piperesizeallowed, 0, "Pipe resizing allowed");
220
221 static void pipeinit(void *dummy __unused);
222 static void pipeclose(struct pipe *cpipe);
223 static void pipe_free_kmem(struct pipe *cpipe);
224 static void pipe_create(struct pipe *pipe, int backing);
225 static void pipe_paircreate(struct thread *td, struct pipepair **p_pp);
226 static __inline int pipelock(struct pipe *cpipe, int catch);
227 static __inline void pipeunlock(struct pipe *cpipe);
228 #ifndef PIPE_NODIRECT
229 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
230 static void pipe_destroy_write_buffer(struct pipe *wpipe);
231 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
232 static void pipe_clone_write_buffer(struct pipe *wpipe);
233 #endif
234 static int pipespace(struct pipe *cpipe, int size);
235 static int pipespace_new(struct pipe *cpipe, int size);
236
237 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags);
238 static int pipe_zone_init(void *mem, int size, int flags);
239 static void pipe_zone_fini(void *mem, int size);
240
241 static uma_zone_t pipe_zone;
242 static struct unrhdr *pipeino_unr;
243 static dev_t pipedev_ino;
244
245 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
246
247 static void
248 pipeinit(void *dummy __unused)
249 {
250
251 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
252 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
253 UMA_ALIGN_PTR, 0);
254 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
255 pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
256 KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
257 pipedev_ino = devfs_alloc_cdp_inode();
258 KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
259 }
260
261 static int
262 pipe_zone_ctor(void *mem, int size, void *arg, int flags)
263 {
264 struct pipepair *pp;
265 struct pipe *rpipe, *wpipe;
266
267 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
268
269 pp = (struct pipepair *)mem;
270
271 /*
272 * We zero both pipe endpoints to make sure all the kmem pointers
273 * are NULL, flag fields are zero'd, etc. We timestamp both
274 * endpoints with the same time.
275 */
276 rpipe = &pp->pp_rpipe;
277 bzero(rpipe, sizeof(*rpipe));
278 vfs_timestamp(&rpipe->pipe_ctime);
279 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
280
281 wpipe = &pp->pp_wpipe;
282 bzero(wpipe, sizeof(*wpipe));
283 wpipe->pipe_ctime = rpipe->pipe_ctime;
284 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
285
286 rpipe->pipe_peer = wpipe;
287 rpipe->pipe_pair = pp;
288 wpipe->pipe_peer = rpipe;
289 wpipe->pipe_pair = pp;
290
291 /*
292 * Mark both endpoints as present; they will later get free'd
293 * one at a time. When both are free'd, then the whole pair
294 * is released.
295 */
296 rpipe->pipe_present = PIPE_ACTIVE;
297 wpipe->pipe_present = PIPE_ACTIVE;
298
299 /*
300 * Eventually, the MAC Framework may initialize the label
301 * in ctor or init, but for now we do it elswhere to avoid
302 * blocking in ctor or init.
303 */
304 pp->pp_label = NULL;
305
306 return (0);
307 }
308
309 static int
310 pipe_zone_init(void *mem, int size, int flags)
311 {
312 struct pipepair *pp;
313
314 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
315
316 pp = (struct pipepair *)mem;
317
318 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF);
319 return (0);
320 }
321
322 static void
323 pipe_zone_fini(void *mem, int size)
324 {
325 struct pipepair *pp;
326
327 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
328
329 pp = (struct pipepair *)mem;
330
331 mtx_destroy(&pp->pp_mtx);
332 }
333
334 static void
335 pipe_paircreate(struct thread *td, struct pipepair **p_pp)
336 {
337 struct pipepair *pp;
338 struct pipe *rpipe, *wpipe;
339
340 *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
341 #ifdef MAC
342 /*
343 * The MAC label is shared between the connected endpoints. As a
344 * result mac_pipe_init() and mac_pipe_create() are called once
345 * for the pair, and not on the endpoints.
346 */
347 mac_pipe_init(pp);
348 mac_pipe_create(td->td_ucred, pp);
349 #endif
350 rpipe = &pp->pp_rpipe;
351 wpipe = &pp->pp_wpipe;
352
353 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
354 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
355
356 /* Only the forward direction pipe is backed by default */
357 pipe_create(rpipe, 1);
358 pipe_create(wpipe, 0);
359
360 rpipe->pipe_state |= PIPE_DIRECTOK;
361 wpipe->pipe_state |= PIPE_DIRECTOK;
362 }
363
364 void
365 pipe_named_ctor(struct pipe **ppipe, struct thread *td)
366 {
367 struct pipepair *pp;
368
369 pipe_paircreate(td, &pp);
370 pp->pp_rpipe.pipe_state |= PIPE_NAMED;
371 *ppipe = &pp->pp_rpipe;
372 }
373
374 void
375 pipe_dtor(struct pipe *dpipe)
376 {
377 struct pipe *peer;
378 ino_t ino;
379
380 ino = dpipe->pipe_ino;
381 peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
382 funsetown(&dpipe->pipe_sigio);
383 pipeclose(dpipe);
384 if (peer != NULL) {
385 funsetown(&peer->pipe_sigio);
386 pipeclose(peer);
387 }
388 if (ino != 0 && ino != (ino_t)-1)
389 free_unr(pipeino_unr, ino);
390 }
391
392 /*
393 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
394 * the zone pick up the pieces via pipeclose().
395 */
396 int
397 kern_pipe(struct thread *td, int fildes[2])
398 {
399
400 return (kern_pipe2(td, fildes, 0));
401 }
402
403 int
404 kern_pipe2(struct thread *td, int fildes[2], int flags)
405 {
406 struct file *rf, *wf;
407 struct pipe *rpipe, *wpipe;
408 struct pipepair *pp;
409 int fd, fflags, error;
410
411 pipe_paircreate(td, &pp);
412 rpipe = &pp->pp_rpipe;
413 wpipe = &pp->pp_wpipe;
414 error = falloc(td, &rf, &fd, flags);
415 if (error) {
416 pipeclose(rpipe);
417 pipeclose(wpipe);
418 return (error);
419 }
420 /* An extra reference on `rf' has been held for us by falloc(). */
421 fildes[0] = fd;
422
423 fflags = FREAD | FWRITE;
424 if ((flags & O_NONBLOCK) != 0)
425 fflags |= FNONBLOCK;
426
427 /*
428 * Warning: once we've gotten past allocation of the fd for the
429 * read-side, we can only drop the read side via fdrop() in order
430 * to avoid races against processes which manage to dup() the read
431 * side while we are blocked trying to allocate the write side.
432 */
433 finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops);
434 error = falloc(td, &wf, &fd, flags);
435 if (error) {
436 fdclose(td, rf, fildes[0]);
437 fdrop(rf, td);
438 /* rpipe has been closed by fdrop(). */
439 pipeclose(wpipe);
440 return (error);
441 }
442 /* An extra reference on `wf' has been held for us by falloc(). */
443 finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops);
444 fdrop(wf, td);
445 fildes[1] = fd;
446 fdrop(rf, td);
447
448 return (0);
449 }
450
451 /* ARGSUSED */
452 int
453 sys_pipe(struct thread *td, struct pipe_args *uap)
454 {
455 int error;
456 int fildes[2];
457
458 error = kern_pipe(td, fildes);
459 if (error)
460 return (error);
461
462 td->td_retval[0] = fildes[0];
463 td->td_retval[1] = fildes[1];
464
465 return (0);
466 }
467
468 int
469 sys_pipe2(struct thread *td, struct pipe2_args *uap)
470 {
471 int error, fildes[2];
472
473 if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK))
474 return (EINVAL);
475 error = kern_pipe2(td, fildes, uap->flags);
476 if (error)
477 return (error);
478 error = copyout(fildes, uap->fildes, 2 * sizeof(int));
479 if (error) {
480 (void)kern_close(td, fildes[0]);
481 (void)kern_close(td, fildes[1]);
482 }
483 return (error);
484 }
485
486 /*
487 * Allocate kva for pipe circular buffer, the space is pageable
488 * This routine will 'realloc' the size of a pipe safely, if it fails
489 * it will retain the old buffer.
490 * If it fails it will return ENOMEM.
491 */
492 static int
493 pipespace_new(cpipe, size)
494 struct pipe *cpipe;
495 int size;
496 {
497 caddr_t buffer;
498 int error, cnt, firstseg;
499 static int curfail = 0;
500 static struct timeval lastfail;
501
502 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
503 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
504 ("pipespace: resize of direct writes not allowed"));
505 retry:
506 cnt = cpipe->pipe_buffer.cnt;
507 if (cnt > size)
508 size = cnt;
509
510 size = round_page(size);
511 buffer = (caddr_t) vm_map_min(pipe_map);
512
513 error = vm_map_find(pipe_map, NULL, 0,
514 (vm_offset_t *) &buffer, size, 0, VMFS_ANY_SPACE,
515 VM_PROT_ALL, VM_PROT_ALL, 0);
516 if (error != KERN_SUCCESS) {
517 if ((cpipe->pipe_buffer.buffer == NULL) &&
518 (size > SMALL_PIPE_SIZE)) {
519 size = SMALL_PIPE_SIZE;
520 pipefragretry++;
521 goto retry;
522 }
523 if (cpipe->pipe_buffer.buffer == NULL) {
524 pipeallocfail++;
525 if (ppsratecheck(&lastfail, &curfail, 1))
526 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
527 } else {
528 piperesizefail++;
529 }
530 return (ENOMEM);
531 }
532
533 /* copy data, then free old resources if we're resizing */
534 if (cnt > 0) {
535 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
536 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
537 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
538 buffer, firstseg);
539 if ((cnt - firstseg) > 0)
540 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
541 cpipe->pipe_buffer.in);
542 } else {
543 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
544 buffer, cnt);
545 }
546 }
547 pipe_free_kmem(cpipe);
548 cpipe->pipe_buffer.buffer = buffer;
549 cpipe->pipe_buffer.size = size;
550 cpipe->pipe_buffer.in = cnt;
551 cpipe->pipe_buffer.out = 0;
552 cpipe->pipe_buffer.cnt = cnt;
553 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
554 return (0);
555 }
556
557 /*
558 * Wrapper for pipespace_new() that performs locking assertions.
559 */
560 static int
561 pipespace(cpipe, size)
562 struct pipe *cpipe;
563 int size;
564 {
565
566 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
567 ("Unlocked pipe passed to pipespace"));
568 return (pipespace_new(cpipe, size));
569 }
570
571 /*
572 * lock a pipe for I/O, blocking other access
573 */
574 static __inline int
575 pipelock(cpipe, catch)
576 struct pipe *cpipe;
577 int catch;
578 {
579 int error;
580
581 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
582 while (cpipe->pipe_state & PIPE_LOCKFL) {
583 cpipe->pipe_state |= PIPE_LWANT;
584 error = msleep(cpipe, PIPE_MTX(cpipe),
585 catch ? (PRIBIO | PCATCH) : PRIBIO,
586 "pipelk", 0);
587 if (error != 0)
588 return (error);
589 }
590 cpipe->pipe_state |= PIPE_LOCKFL;
591 return (0);
592 }
593
594 /*
595 * unlock a pipe I/O lock
596 */
597 static __inline void
598 pipeunlock(cpipe)
599 struct pipe *cpipe;
600 {
601
602 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
603 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
604 ("Unlocked pipe passed to pipeunlock"));
605 cpipe->pipe_state &= ~PIPE_LOCKFL;
606 if (cpipe->pipe_state & PIPE_LWANT) {
607 cpipe->pipe_state &= ~PIPE_LWANT;
608 wakeup(cpipe);
609 }
610 }
611
612 void
613 pipeselwakeup(cpipe)
614 struct pipe *cpipe;
615 {
616
617 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
618 if (cpipe->pipe_state & PIPE_SEL) {
619 selwakeuppri(&cpipe->pipe_sel, PSOCK);
620 if (!SEL_WAITING(&cpipe->pipe_sel))
621 cpipe->pipe_state &= ~PIPE_SEL;
622 }
623 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
624 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
625 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
626 }
627
628 /*
629 * Initialize and allocate VM and memory for pipe. The structure
630 * will start out zero'd from the ctor, so we just manage the kmem.
631 */
632 static void
633 pipe_create(pipe, backing)
634 struct pipe *pipe;
635 int backing;
636 {
637
638 if (backing) {
639 /*
640 * Note that these functions can fail if pipe map is exhausted
641 * (as a result of too many pipes created), but we ignore the
642 * error as it is not fatal and could be provoked by
643 * unprivileged users. The only consequence is worse performance
644 * with given pipe.
645 */
646 if (amountpipekva > maxpipekva / 2)
647 (void)pipespace_new(pipe, SMALL_PIPE_SIZE);
648 else
649 (void)pipespace_new(pipe, PIPE_SIZE);
650 }
651
652 pipe->pipe_ino = -1;
653 }
654
655 /* ARGSUSED */
656 static int
657 pipe_read(fp, uio, active_cred, flags, td)
658 struct file *fp;
659 struct uio *uio;
660 struct ucred *active_cred;
661 struct thread *td;
662 int flags;
663 {
664 struct pipe *rpipe;
665 int error;
666 int nread = 0;
667 int size;
668
669 rpipe = fp->f_data;
670 PIPE_LOCK(rpipe);
671 ++rpipe->pipe_busy;
672 error = pipelock(rpipe, 1);
673 if (error)
674 goto unlocked_error;
675
676 #ifdef MAC
677 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
678 if (error)
679 goto locked_error;
680 #endif
681 if (amountpipekva > (3 * maxpipekva) / 4) {
682 if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
683 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
684 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
685 (piperesizeallowed == 1)) {
686 PIPE_UNLOCK(rpipe);
687 pipespace(rpipe, SMALL_PIPE_SIZE);
688 PIPE_LOCK(rpipe);
689 }
690 }
691
692 while (uio->uio_resid) {
693 /*
694 * normal pipe buffer receive
695 */
696 if (rpipe->pipe_buffer.cnt > 0) {
697 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
698 if (size > rpipe->pipe_buffer.cnt)
699 size = rpipe->pipe_buffer.cnt;
700 if (size > uio->uio_resid)
701 size = uio->uio_resid;
702
703 PIPE_UNLOCK(rpipe);
704 error = uiomove(
705 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
706 size, uio);
707 PIPE_LOCK(rpipe);
708 if (error)
709 break;
710
711 rpipe->pipe_buffer.out += size;
712 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
713 rpipe->pipe_buffer.out = 0;
714
715 rpipe->pipe_buffer.cnt -= size;
716
717 /*
718 * If there is no more to read in the pipe, reset
719 * its pointers to the beginning. This improves
720 * cache hit stats.
721 */
722 if (rpipe->pipe_buffer.cnt == 0) {
723 rpipe->pipe_buffer.in = 0;
724 rpipe->pipe_buffer.out = 0;
725 }
726 nread += size;
727 #ifndef PIPE_NODIRECT
728 /*
729 * Direct copy, bypassing a kernel buffer.
730 */
731 } else if ((size = rpipe->pipe_map.cnt) &&
732 (rpipe->pipe_state & PIPE_DIRECTW)) {
733 if (size > uio->uio_resid)
734 size = (u_int) uio->uio_resid;
735
736 PIPE_UNLOCK(rpipe);
737 error = uiomove_fromphys(rpipe->pipe_map.ms,
738 rpipe->pipe_map.pos, size, uio);
739 PIPE_LOCK(rpipe);
740 if (error)
741 break;
742 nread += size;
743 rpipe->pipe_map.pos += size;
744 rpipe->pipe_map.cnt -= size;
745 if (rpipe->pipe_map.cnt == 0) {
746 rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW);
747 wakeup(rpipe);
748 }
749 #endif
750 } else {
751 /*
752 * detect EOF condition
753 * read returns 0 on EOF, no need to set error
754 */
755 if (rpipe->pipe_state & PIPE_EOF)
756 break;
757
758 /*
759 * If the "write-side" has been blocked, wake it up now.
760 */
761 if (rpipe->pipe_state & PIPE_WANTW) {
762 rpipe->pipe_state &= ~PIPE_WANTW;
763 wakeup(rpipe);
764 }
765
766 /*
767 * Break if some data was read.
768 */
769 if (nread > 0)
770 break;
771
772 /*
773 * Unlock the pipe buffer for our remaining processing.
774 * We will either break out with an error or we will
775 * sleep and relock to loop.
776 */
777 pipeunlock(rpipe);
778
779 /*
780 * Handle non-blocking mode operation or
781 * wait for more data.
782 */
783 if (fp->f_flag & FNONBLOCK) {
784 error = EAGAIN;
785 } else {
786 rpipe->pipe_state |= PIPE_WANTR;
787 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
788 PRIBIO | PCATCH,
789 "piperd", 0)) == 0)
790 error = pipelock(rpipe, 1);
791 }
792 if (error)
793 goto unlocked_error;
794 }
795 }
796 #ifdef MAC
797 locked_error:
798 #endif
799 pipeunlock(rpipe);
800
801 /* XXX: should probably do this before getting any locks. */
802 if (error == 0)
803 vfs_timestamp(&rpipe->pipe_atime);
804 unlocked_error:
805 --rpipe->pipe_busy;
806
807 /*
808 * PIPE_WANT processing only makes sense if pipe_busy is 0.
809 */
810 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
811 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
812 wakeup(rpipe);
813 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
814 /*
815 * Handle write blocking hysteresis.
816 */
817 if (rpipe->pipe_state & PIPE_WANTW) {
818 rpipe->pipe_state &= ~PIPE_WANTW;
819 wakeup(rpipe);
820 }
821 }
822
823 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
824 pipeselwakeup(rpipe);
825
826 PIPE_UNLOCK(rpipe);
827 return (error);
828 }
829
830 #ifndef PIPE_NODIRECT
831 /*
832 * Map the sending processes' buffer into kernel space and wire it.
833 * This is similar to a physical write operation.
834 */
835 static int
836 pipe_build_write_buffer(wpipe, uio)
837 struct pipe *wpipe;
838 struct uio *uio;
839 {
840 u_int size;
841 int i;
842
843 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
844 KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
845 ("Clone attempt on non-direct write pipe!"));
846
847 if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
848 size = wpipe->pipe_buffer.size;
849 else
850 size = uio->uio_iov->iov_len;
851
852 if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
853 (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
854 wpipe->pipe_map.ms, PIPENPAGES)) < 0)
855 return (EFAULT);
856
857 /*
858 * set up the control block
859 */
860 wpipe->pipe_map.npages = i;
861 wpipe->pipe_map.pos =
862 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
863 wpipe->pipe_map.cnt = size;
864
865 /*
866 * and update the uio data
867 */
868
869 uio->uio_iov->iov_len -= size;
870 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
871 if (uio->uio_iov->iov_len == 0)
872 uio->uio_iov++;
873 uio->uio_resid -= size;
874 uio->uio_offset += size;
875 return (0);
876 }
877
878 /*
879 * unmap and unwire the process buffer
880 */
881 static void
882 pipe_destroy_write_buffer(wpipe)
883 struct pipe *wpipe;
884 {
885
886 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
887 vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
888 wpipe->pipe_map.npages = 0;
889 }
890
891 /*
892 * In the case of a signal, the writing process might go away. This
893 * code copies the data into the circular buffer so that the source
894 * pages can be freed without loss of data.
895 */
896 static void
897 pipe_clone_write_buffer(wpipe)
898 struct pipe *wpipe;
899 {
900 struct uio uio;
901 struct iovec iov;
902 int size;
903 int pos;
904
905 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
906 size = wpipe->pipe_map.cnt;
907 pos = wpipe->pipe_map.pos;
908
909 wpipe->pipe_buffer.in = size;
910 wpipe->pipe_buffer.out = 0;
911 wpipe->pipe_buffer.cnt = size;
912 wpipe->pipe_state &= ~PIPE_DIRECTW;
913
914 PIPE_UNLOCK(wpipe);
915 iov.iov_base = wpipe->pipe_buffer.buffer;
916 iov.iov_len = size;
917 uio.uio_iov = &iov;
918 uio.uio_iovcnt = 1;
919 uio.uio_offset = 0;
920 uio.uio_resid = size;
921 uio.uio_segflg = UIO_SYSSPACE;
922 uio.uio_rw = UIO_READ;
923 uio.uio_td = curthread;
924 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
925 PIPE_LOCK(wpipe);
926 pipe_destroy_write_buffer(wpipe);
927 }
928
929 /*
930 * This implements the pipe buffer write mechanism. Note that only
931 * a direct write OR a normal pipe write can be pending at any given time.
932 * If there are any characters in the pipe buffer, the direct write will
933 * be deferred until the receiving process grabs all of the bytes from
934 * the pipe buffer. Then the direct mapping write is set-up.
935 */
936 static int
937 pipe_direct_write(wpipe, uio)
938 struct pipe *wpipe;
939 struct uio *uio;
940 {
941 int error;
942
943 retry:
944 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
945 error = pipelock(wpipe, 1);
946 if (error != 0)
947 goto error1;
948 if ((wpipe->pipe_state & PIPE_EOF) != 0) {
949 error = EPIPE;
950 pipeunlock(wpipe);
951 goto error1;
952 }
953 while (wpipe->pipe_state & PIPE_DIRECTW) {
954 if (wpipe->pipe_state & PIPE_WANTR) {
955 wpipe->pipe_state &= ~PIPE_WANTR;
956 wakeup(wpipe);
957 }
958 pipeselwakeup(wpipe);
959 wpipe->pipe_state |= PIPE_WANTW;
960 pipeunlock(wpipe);
961 error = msleep(wpipe, PIPE_MTX(wpipe),
962 PRIBIO | PCATCH, "pipdww", 0);
963 if (error)
964 goto error1;
965 else
966 goto retry;
967 }
968 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
969 if (wpipe->pipe_buffer.cnt > 0) {
970 if (wpipe->pipe_state & PIPE_WANTR) {
971 wpipe->pipe_state &= ~PIPE_WANTR;
972 wakeup(wpipe);
973 }
974 pipeselwakeup(wpipe);
975 wpipe->pipe_state |= PIPE_WANTW;
976 pipeunlock(wpipe);
977 error = msleep(wpipe, PIPE_MTX(wpipe),
978 PRIBIO | PCATCH, "pipdwc", 0);
979 if (error)
980 goto error1;
981 else
982 goto retry;
983 }
984
985 wpipe->pipe_state |= PIPE_DIRECTW;
986
987 PIPE_UNLOCK(wpipe);
988 error = pipe_build_write_buffer(wpipe, uio);
989 PIPE_LOCK(wpipe);
990 if (error) {
991 wpipe->pipe_state &= ~PIPE_DIRECTW;
992 pipeunlock(wpipe);
993 goto error1;
994 }
995
996 error = 0;
997 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
998 if (wpipe->pipe_state & PIPE_EOF) {
999 pipe_destroy_write_buffer(wpipe);
1000 pipeselwakeup(wpipe);
1001 pipeunlock(wpipe);
1002 error = EPIPE;
1003 goto error1;
1004 }
1005 if (wpipe->pipe_state & PIPE_WANTR) {
1006 wpipe->pipe_state &= ~PIPE_WANTR;
1007 wakeup(wpipe);
1008 }
1009 pipeselwakeup(wpipe);
1010 wpipe->pipe_state |= PIPE_WANTW;
1011 pipeunlock(wpipe);
1012 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
1013 "pipdwt", 0);
1014 pipelock(wpipe, 0);
1015 }
1016
1017 if (wpipe->pipe_state & PIPE_EOF)
1018 error = EPIPE;
1019 if (wpipe->pipe_state & PIPE_DIRECTW) {
1020 /*
1021 * this bit of trickery substitutes a kernel buffer for
1022 * the process that might be going away.
1023 */
1024 pipe_clone_write_buffer(wpipe);
1025 } else {
1026 pipe_destroy_write_buffer(wpipe);
1027 }
1028 pipeunlock(wpipe);
1029 return (error);
1030
1031 error1:
1032 wakeup(wpipe);
1033 return (error);
1034 }
1035 #endif
1036
1037 static int
1038 pipe_write(fp, uio, active_cred, flags, td)
1039 struct file *fp;
1040 struct uio *uio;
1041 struct ucred *active_cred;
1042 struct thread *td;
1043 int flags;
1044 {
1045 int error = 0;
1046 int desiredsize;
1047 ssize_t orig_resid;
1048 struct pipe *wpipe, *rpipe;
1049
1050 rpipe = fp->f_data;
1051 wpipe = PIPE_PEER(rpipe);
1052 PIPE_LOCK(rpipe);
1053 error = pipelock(wpipe, 1);
1054 if (error) {
1055 PIPE_UNLOCK(rpipe);
1056 return (error);
1057 }
1058 /*
1059 * detect loss of pipe read side, issue SIGPIPE if lost.
1060 */
1061 if (wpipe->pipe_present != PIPE_ACTIVE ||
1062 (wpipe->pipe_state & PIPE_EOF)) {
1063 pipeunlock(wpipe);
1064 PIPE_UNLOCK(rpipe);
1065 return (EPIPE);
1066 }
1067 #ifdef MAC
1068 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
1069 if (error) {
1070 pipeunlock(wpipe);
1071 PIPE_UNLOCK(rpipe);
1072 return (error);
1073 }
1074 #endif
1075 ++wpipe->pipe_busy;
1076
1077 /* Choose a larger size if it's advantageous */
1078 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
1079 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
1080 if (piperesizeallowed != 1)
1081 break;
1082 if (amountpipekva > maxpipekva / 2)
1083 break;
1084 if (desiredsize == BIG_PIPE_SIZE)
1085 break;
1086 desiredsize = desiredsize * 2;
1087 }
1088
1089 /* Choose a smaller size if we're in a OOM situation */
1090 if ((amountpipekva > (3 * maxpipekva) / 4) &&
1091 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1092 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1093 (piperesizeallowed == 1))
1094 desiredsize = SMALL_PIPE_SIZE;
1095
1096 /* Resize if the above determined that a new size was necessary */
1097 if ((desiredsize != wpipe->pipe_buffer.size) &&
1098 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1099 PIPE_UNLOCK(wpipe);
1100 pipespace(wpipe, desiredsize);
1101 PIPE_LOCK(wpipe);
1102 }
1103 if (wpipe->pipe_buffer.size == 0) {
1104 /*
1105 * This can only happen for reverse direction use of pipes
1106 * in a complete OOM situation.
1107 */
1108 error = ENOMEM;
1109 --wpipe->pipe_busy;
1110 pipeunlock(wpipe);
1111 PIPE_UNLOCK(wpipe);
1112 return (error);
1113 }
1114
1115 pipeunlock(wpipe);
1116
1117 orig_resid = uio->uio_resid;
1118
1119 while (uio->uio_resid) {
1120 int space;
1121
1122 pipelock(wpipe, 0);
1123 if (wpipe->pipe_state & PIPE_EOF) {
1124 pipeunlock(wpipe);
1125 error = EPIPE;
1126 break;
1127 }
1128 #ifndef PIPE_NODIRECT
1129 /*
1130 * If the transfer is large, we can gain performance if
1131 * we do process-to-process copies directly.
1132 * If the write is non-blocking, we don't use the
1133 * direct write mechanism.
1134 *
1135 * The direct write mechanism will detect the reader going
1136 * away on us.
1137 */
1138 if (uio->uio_segflg == UIO_USERSPACE &&
1139 uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1140 wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1141 (fp->f_flag & FNONBLOCK) == 0) {
1142 pipeunlock(wpipe);
1143 error = pipe_direct_write(wpipe, uio);
1144 if (error)
1145 break;
1146 continue;
1147 }
1148 #endif
1149
1150 /*
1151 * Pipe buffered writes cannot be coincidental with
1152 * direct writes. We wait until the currently executing
1153 * direct write is completed before we start filling the
1154 * pipe buffer. We break out if a signal occurs or the
1155 * reader goes away.
1156 */
1157 if (wpipe->pipe_state & PIPE_DIRECTW) {
1158 if (wpipe->pipe_state & PIPE_WANTR) {
1159 wpipe->pipe_state &= ~PIPE_WANTR;
1160 wakeup(wpipe);
1161 }
1162 pipeselwakeup(wpipe);
1163 wpipe->pipe_state |= PIPE_WANTW;
1164 pipeunlock(wpipe);
1165 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1166 "pipbww", 0);
1167 if (error)
1168 break;
1169 else
1170 continue;
1171 }
1172
1173 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1174
1175 /* Writes of size <= PIPE_BUF must be atomic. */
1176 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1177 space = 0;
1178
1179 if (space > 0) {
1180 int size; /* Transfer size */
1181 int segsize; /* first segment to transfer */
1182
1183 /*
1184 * Transfer size is minimum of uio transfer
1185 * and free space in pipe buffer.
1186 */
1187 if (space > uio->uio_resid)
1188 size = uio->uio_resid;
1189 else
1190 size = space;
1191 /*
1192 * First segment to transfer is minimum of
1193 * transfer size and contiguous space in
1194 * pipe buffer. If first segment to transfer
1195 * is less than the transfer size, we've got
1196 * a wraparound in the buffer.
1197 */
1198 segsize = wpipe->pipe_buffer.size -
1199 wpipe->pipe_buffer.in;
1200 if (segsize > size)
1201 segsize = size;
1202
1203 /* Transfer first segment */
1204
1205 PIPE_UNLOCK(rpipe);
1206 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1207 segsize, uio);
1208 PIPE_LOCK(rpipe);
1209
1210 if (error == 0 && segsize < size) {
1211 KASSERT(wpipe->pipe_buffer.in + segsize ==
1212 wpipe->pipe_buffer.size,
1213 ("Pipe buffer wraparound disappeared"));
1214 /*
1215 * Transfer remaining part now, to
1216 * support atomic writes. Wraparound
1217 * happened.
1218 */
1219
1220 PIPE_UNLOCK(rpipe);
1221 error = uiomove(
1222 &wpipe->pipe_buffer.buffer[0],
1223 size - segsize, uio);
1224 PIPE_LOCK(rpipe);
1225 }
1226 if (error == 0) {
1227 wpipe->pipe_buffer.in += size;
1228 if (wpipe->pipe_buffer.in >=
1229 wpipe->pipe_buffer.size) {
1230 KASSERT(wpipe->pipe_buffer.in ==
1231 size - segsize +
1232 wpipe->pipe_buffer.size,
1233 ("Expected wraparound bad"));
1234 wpipe->pipe_buffer.in = size - segsize;
1235 }
1236
1237 wpipe->pipe_buffer.cnt += size;
1238 KASSERT(wpipe->pipe_buffer.cnt <=
1239 wpipe->pipe_buffer.size,
1240 ("Pipe buffer overflow"));
1241 }
1242 pipeunlock(wpipe);
1243 if (error != 0)
1244 break;
1245 } else {
1246 /*
1247 * If the "read-side" has been blocked, wake it up now.
1248 */
1249 if (wpipe->pipe_state & PIPE_WANTR) {
1250 wpipe->pipe_state &= ~PIPE_WANTR;
1251 wakeup(wpipe);
1252 }
1253
1254 /*
1255 * don't block on non-blocking I/O
1256 */
1257 if (fp->f_flag & FNONBLOCK) {
1258 error = EAGAIN;
1259 pipeunlock(wpipe);
1260 break;
1261 }
1262
1263 /*
1264 * We have no more space and have something to offer,
1265 * wake up select/poll.
1266 */
1267 pipeselwakeup(wpipe);
1268
1269 wpipe->pipe_state |= PIPE_WANTW;
1270 pipeunlock(wpipe);
1271 error = msleep(wpipe, PIPE_MTX(rpipe),
1272 PRIBIO | PCATCH, "pipewr", 0);
1273 if (error != 0)
1274 break;
1275 }
1276 }
1277
1278 pipelock(wpipe, 0);
1279 --wpipe->pipe_busy;
1280
1281 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1282 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1283 wakeup(wpipe);
1284 } else if (wpipe->pipe_buffer.cnt > 0) {
1285 /*
1286 * If we have put any characters in the buffer, we wake up
1287 * the reader.
1288 */
1289 if (wpipe->pipe_state & PIPE_WANTR) {
1290 wpipe->pipe_state &= ~PIPE_WANTR;
1291 wakeup(wpipe);
1292 }
1293 }
1294
1295 /*
1296 * Don't return EPIPE if any byte was written.
1297 * EINTR and other interrupts are handled by generic I/O layer.
1298 * Do not pretend that I/O succeeded for obvious user error
1299 * like EFAULT.
1300 */
1301 if (uio->uio_resid != orig_resid && error == EPIPE)
1302 error = 0;
1303
1304 if (error == 0)
1305 vfs_timestamp(&wpipe->pipe_mtime);
1306
1307 /*
1308 * We have something to offer,
1309 * wake up select/poll.
1310 */
1311 if (wpipe->pipe_buffer.cnt)
1312 pipeselwakeup(wpipe);
1313
1314 pipeunlock(wpipe);
1315 PIPE_UNLOCK(rpipe);
1316 return (error);
1317 }
1318
1319 /* ARGSUSED */
1320 static int
1321 pipe_truncate(fp, length, active_cred, td)
1322 struct file *fp;
1323 off_t length;
1324 struct ucred *active_cred;
1325 struct thread *td;
1326 {
1327
1328 /* For named pipes call the vnode operation. */
1329 if (fp->f_vnode != NULL)
1330 return (vnops.fo_truncate(fp, length, active_cred, td));
1331 return (EINVAL);
1332 }
1333
1334 /*
1335 * we implement a very minimal set of ioctls for compatibility with sockets.
1336 */
1337 static int
1338 pipe_ioctl(fp, cmd, data, active_cred, td)
1339 struct file *fp;
1340 u_long cmd;
1341 void *data;
1342 struct ucred *active_cred;
1343 struct thread *td;
1344 {
1345 struct pipe *mpipe = fp->f_data;
1346 int error;
1347
1348 PIPE_LOCK(mpipe);
1349
1350 #ifdef MAC
1351 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1352 if (error) {
1353 PIPE_UNLOCK(mpipe);
1354 return (error);
1355 }
1356 #endif
1357
1358 error = 0;
1359 switch (cmd) {
1360
1361 case FIONBIO:
1362 break;
1363
1364 case FIOASYNC:
1365 if (*(int *)data) {
1366 mpipe->pipe_state |= PIPE_ASYNC;
1367 } else {
1368 mpipe->pipe_state &= ~PIPE_ASYNC;
1369 }
1370 break;
1371
1372 case FIONREAD:
1373 if (!(fp->f_flag & FREAD)) {
1374 *(int *)data = 0;
1375 PIPE_UNLOCK(mpipe);
1376 return (0);
1377 }
1378 if (mpipe->pipe_state & PIPE_DIRECTW)
1379 *(int *)data = mpipe->pipe_map.cnt;
1380 else
1381 *(int *)data = mpipe->pipe_buffer.cnt;
1382 break;
1383
1384 case FIOSETOWN:
1385 PIPE_UNLOCK(mpipe);
1386 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1387 goto out_unlocked;
1388
1389 case FIOGETOWN:
1390 *(int *)data = fgetown(&mpipe->pipe_sigio);
1391 break;
1392
1393 /* This is deprecated, FIOSETOWN should be used instead. */
1394 case TIOCSPGRP:
1395 PIPE_UNLOCK(mpipe);
1396 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1397 goto out_unlocked;
1398
1399 /* This is deprecated, FIOGETOWN should be used instead. */
1400 case TIOCGPGRP:
1401 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1402 break;
1403
1404 default:
1405 error = ENOTTY;
1406 break;
1407 }
1408 PIPE_UNLOCK(mpipe);
1409 out_unlocked:
1410 return (error);
1411 }
1412
1413 static int
1414 pipe_poll(fp, events, active_cred, td)
1415 struct file *fp;
1416 int events;
1417 struct ucred *active_cred;
1418 struct thread *td;
1419 {
1420 struct pipe *rpipe;
1421 struct pipe *wpipe;
1422 int levents, revents;
1423 #ifdef MAC
1424 int error;
1425 #endif
1426
1427 revents = 0;
1428 rpipe = fp->f_data;
1429 wpipe = PIPE_PEER(rpipe);
1430 PIPE_LOCK(rpipe);
1431 #ifdef MAC
1432 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1433 if (error)
1434 goto locked_error;
1435 #endif
1436 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
1437 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1438 (rpipe->pipe_buffer.cnt > 0))
1439 revents |= events & (POLLIN | POLLRDNORM);
1440
1441 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
1442 if (wpipe->pipe_present != PIPE_ACTIVE ||
1443 (wpipe->pipe_state & PIPE_EOF) ||
1444 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1445 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
1446 wpipe->pipe_buffer.size == 0)))
1447 revents |= events & (POLLOUT | POLLWRNORM);
1448
1449 levents = events &
1450 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
1451 if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
1452 fp->f_seqcount == rpipe->pipe_wgen)
1453 events |= POLLINIGNEOF;
1454
1455 if ((events & POLLINIGNEOF) == 0) {
1456 if (rpipe->pipe_state & PIPE_EOF) {
1457 revents |= (events & (POLLIN | POLLRDNORM));
1458 if (wpipe->pipe_present != PIPE_ACTIVE ||
1459 (wpipe->pipe_state & PIPE_EOF))
1460 revents |= POLLHUP;
1461 }
1462 }
1463
1464 if (revents == 0) {
1465 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
1466 selrecord(td, &rpipe->pipe_sel);
1467 if (SEL_WAITING(&rpipe->pipe_sel))
1468 rpipe->pipe_state |= PIPE_SEL;
1469 }
1470
1471 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1472 selrecord(td, &wpipe->pipe_sel);
1473 if (SEL_WAITING(&wpipe->pipe_sel))
1474 wpipe->pipe_state |= PIPE_SEL;
1475 }
1476 }
1477 #ifdef MAC
1478 locked_error:
1479 #endif
1480 PIPE_UNLOCK(rpipe);
1481
1482 return (revents);
1483 }
1484
1485 /*
1486 * We shouldn't need locks here as we're doing a read and this should
1487 * be a natural race.
1488 */
1489 static int
1490 pipe_stat(fp, ub, active_cred, td)
1491 struct file *fp;
1492 struct stat *ub;
1493 struct ucred *active_cred;
1494 struct thread *td;
1495 {
1496 struct pipe *pipe;
1497 int new_unr;
1498 #ifdef MAC
1499 int error;
1500 #endif
1501
1502 pipe = fp->f_data;
1503 PIPE_LOCK(pipe);
1504 #ifdef MAC
1505 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1506 if (error) {
1507 PIPE_UNLOCK(pipe);
1508 return (error);
1509 }
1510 #endif
1511
1512 /* For named pipes ask the underlying filesystem. */
1513 if (pipe->pipe_state & PIPE_NAMED) {
1514 PIPE_UNLOCK(pipe);
1515 return (vnops.fo_stat(fp, ub, active_cred, td));
1516 }
1517
1518 /*
1519 * Lazily allocate an inode number for the pipe. Most pipe
1520 * users do not call fstat(2) on the pipe, which means that
1521 * postponing the inode allocation until it is must be
1522 * returned to userland is useful. If alloc_unr failed,
1523 * assign st_ino zero instead of returning an error.
1524 * Special pipe_ino values:
1525 * -1 - not yet initialized;
1526 * 0 - alloc_unr failed, return 0 as st_ino forever.
1527 */
1528 if (pipe->pipe_ino == (ino_t)-1) {
1529 new_unr = alloc_unr(pipeino_unr);
1530 if (new_unr != -1)
1531 pipe->pipe_ino = new_unr;
1532 else
1533 pipe->pipe_ino = 0;
1534 }
1535 PIPE_UNLOCK(pipe);
1536
1537 bzero(ub, sizeof(*ub));
1538 ub->st_mode = S_IFIFO;
1539 ub->st_blksize = PAGE_SIZE;
1540 if (pipe->pipe_state & PIPE_DIRECTW)
1541 ub->st_size = pipe->pipe_map.cnt;
1542 else
1543 ub->st_size = pipe->pipe_buffer.cnt;
1544 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1545 ub->st_atim = pipe->pipe_atime;
1546 ub->st_mtim = pipe->pipe_mtime;
1547 ub->st_ctim = pipe->pipe_ctime;
1548 ub->st_uid = fp->f_cred->cr_uid;
1549 ub->st_gid = fp->f_cred->cr_gid;
1550 ub->st_dev = pipedev_ino;
1551 ub->st_ino = pipe->pipe_ino;
1552 /*
1553 * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1554 */
1555 return (0);
1556 }
1557
1558 /* ARGSUSED */
1559 static int
1560 pipe_close(fp, td)
1561 struct file *fp;
1562 struct thread *td;
1563 {
1564
1565 if (fp->f_vnode != NULL)
1566 return vnops.fo_close(fp, td);
1567 fp->f_ops = &badfileops;
1568 pipe_dtor(fp->f_data);
1569 fp->f_data = NULL;
1570 return (0);
1571 }
1572
1573 static int
1574 pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
1575 {
1576 struct pipe *cpipe;
1577 int error;
1578
1579 cpipe = fp->f_data;
1580 if (cpipe->pipe_state & PIPE_NAMED)
1581 error = vn_chmod(fp, mode, active_cred, td);
1582 else
1583 error = invfo_chmod(fp, mode, active_cred, td);
1584 return (error);
1585 }
1586
1587 static int
1588 pipe_chown(fp, uid, gid, active_cred, td)
1589 struct file *fp;
1590 uid_t uid;
1591 gid_t gid;
1592 struct ucred *active_cred;
1593 struct thread *td;
1594 {
1595 struct pipe *cpipe;
1596 int error;
1597
1598 cpipe = fp->f_data;
1599 if (cpipe->pipe_state & PIPE_NAMED)
1600 error = vn_chown(fp, uid, gid, active_cred, td);
1601 else
1602 error = invfo_chown(fp, uid, gid, active_cred, td);
1603 return (error);
1604 }
1605
1606 static void
1607 pipe_free_kmem(cpipe)
1608 struct pipe *cpipe;
1609 {
1610
1611 KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1612 ("pipe_free_kmem: pipe mutex locked"));
1613
1614 if (cpipe->pipe_buffer.buffer != NULL) {
1615 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
1616 vm_map_remove(pipe_map,
1617 (vm_offset_t)cpipe->pipe_buffer.buffer,
1618 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
1619 cpipe->pipe_buffer.buffer = NULL;
1620 }
1621 #ifndef PIPE_NODIRECT
1622 {
1623 cpipe->pipe_map.cnt = 0;
1624 cpipe->pipe_map.pos = 0;
1625 cpipe->pipe_map.npages = 0;
1626 }
1627 #endif
1628 }
1629
1630 /*
1631 * shutdown the pipe
1632 */
1633 static void
1634 pipeclose(cpipe)
1635 struct pipe *cpipe;
1636 {
1637 struct pipepair *pp;
1638 struct pipe *ppipe;
1639
1640 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1641
1642 PIPE_LOCK(cpipe);
1643 pipelock(cpipe, 0);
1644 pp = cpipe->pipe_pair;
1645
1646 pipeselwakeup(cpipe);
1647
1648 /*
1649 * If the other side is blocked, wake it up saying that
1650 * we want to close it down.
1651 */
1652 cpipe->pipe_state |= PIPE_EOF;
1653 while (cpipe->pipe_busy) {
1654 wakeup(cpipe);
1655 cpipe->pipe_state |= PIPE_WANT;
1656 pipeunlock(cpipe);
1657 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1658 pipelock(cpipe, 0);
1659 }
1660
1661
1662 /*
1663 * Disconnect from peer, if any.
1664 */
1665 ppipe = cpipe->pipe_peer;
1666 if (ppipe->pipe_present == PIPE_ACTIVE) {
1667 pipeselwakeup(ppipe);
1668
1669 ppipe->pipe_state |= PIPE_EOF;
1670 wakeup(ppipe);
1671 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1672 }
1673
1674 /*
1675 * Mark this endpoint as free. Release kmem resources. We
1676 * don't mark this endpoint as unused until we've finished
1677 * doing that, or the pipe might disappear out from under
1678 * us.
1679 */
1680 PIPE_UNLOCK(cpipe);
1681 pipe_free_kmem(cpipe);
1682 PIPE_LOCK(cpipe);
1683 cpipe->pipe_present = PIPE_CLOSING;
1684 pipeunlock(cpipe);
1685
1686 /*
1687 * knlist_clear() may sleep dropping the PIPE_MTX. Set the
1688 * PIPE_FINALIZED, that allows other end to free the
1689 * pipe_pair, only after the knotes are completely dismantled.
1690 */
1691 knlist_clear(&cpipe->pipe_sel.si_note, 1);
1692 cpipe->pipe_present = PIPE_FINALIZED;
1693 seldrain(&cpipe->pipe_sel);
1694 knlist_destroy(&cpipe->pipe_sel.si_note);
1695
1696 /*
1697 * If both endpoints are now closed, release the memory for the
1698 * pipe pair. If not, unlock.
1699 */
1700 if (ppipe->pipe_present == PIPE_FINALIZED) {
1701 PIPE_UNLOCK(cpipe);
1702 #ifdef MAC
1703 mac_pipe_destroy(pp);
1704 #endif
1705 uma_zfree(pipe_zone, cpipe->pipe_pair);
1706 } else
1707 PIPE_UNLOCK(cpipe);
1708 }
1709
1710 /*ARGSUSED*/
1711 static int
1712 pipe_kqfilter(struct file *fp, struct knote *kn)
1713 {
1714 struct pipe *cpipe;
1715
1716 /*
1717 * If a filter is requested that is not supported by this file
1718 * descriptor, don't return an error, but also don't ever generate an
1719 * event.
1720 */
1721 if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
1722 kn->kn_fop = &pipe_nfiltops;
1723 return (0);
1724 }
1725 if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
1726 kn->kn_fop = &pipe_nfiltops;
1727 return (0);
1728 }
1729 cpipe = fp->f_data;
1730 PIPE_LOCK(cpipe);
1731 switch (kn->kn_filter) {
1732 case EVFILT_READ:
1733 kn->kn_fop = &pipe_rfiltops;
1734 break;
1735 case EVFILT_WRITE:
1736 kn->kn_fop = &pipe_wfiltops;
1737 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
1738 /* other end of pipe has been closed */
1739 PIPE_UNLOCK(cpipe);
1740 return (EPIPE);
1741 }
1742 cpipe = PIPE_PEER(cpipe);
1743 break;
1744 default:
1745 PIPE_UNLOCK(cpipe);
1746 return (EINVAL);
1747 }
1748
1749 kn->kn_hook = cpipe;
1750 knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
1751 PIPE_UNLOCK(cpipe);
1752 return (0);
1753 }
1754
1755 static void
1756 filt_pipedetach(struct knote *kn)
1757 {
1758 struct pipe *cpipe = kn->kn_hook;
1759
1760 PIPE_LOCK(cpipe);
1761 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
1762 PIPE_UNLOCK(cpipe);
1763 }
1764
1765 /*ARGSUSED*/
1766 static int
1767 filt_piperead(struct knote *kn, long hint)
1768 {
1769 struct pipe *rpipe = kn->kn_hook;
1770 struct pipe *wpipe = rpipe->pipe_peer;
1771 int ret;
1772
1773 PIPE_LOCK_ASSERT(rpipe, MA_OWNED);
1774 kn->kn_data = rpipe->pipe_buffer.cnt;
1775 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1776 kn->kn_data = rpipe->pipe_map.cnt;
1777
1778 if ((rpipe->pipe_state & PIPE_EOF) ||
1779 wpipe->pipe_present != PIPE_ACTIVE ||
1780 (wpipe->pipe_state & PIPE_EOF)) {
1781 kn->kn_flags |= EV_EOF;
1782 return (1);
1783 }
1784 ret = kn->kn_data > 0;
1785 return ret;
1786 }
1787
1788 /*ARGSUSED*/
1789 static int
1790 filt_pipewrite(struct knote *kn, long hint)
1791 {
1792 struct pipe *wpipe;
1793
1794 wpipe = kn->kn_hook;
1795 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1796 if (wpipe->pipe_present != PIPE_ACTIVE ||
1797 (wpipe->pipe_state & PIPE_EOF)) {
1798 kn->kn_data = 0;
1799 kn->kn_flags |= EV_EOF;
1800 return (1);
1801 }
1802 kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
1803 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
1804 if (wpipe->pipe_state & PIPE_DIRECTW)
1805 kn->kn_data = 0;
1806
1807 return (kn->kn_data >= PIPE_BUF);
1808 }
1809
1810 static void
1811 filt_pipedetach_notsup(struct knote *kn)
1812 {
1813
1814 }
1815
1816 static int
1817 filt_pipenotsup(struct knote *kn, long hint)
1818 {
1819
1820 return (0);
1821 }
Cache object: e3479cf884095d5c3c51d689e9ec3be3
|