FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_pipe.c
1 /*-
2 * Copyright (c) 1996 John S. Dyson
3 * Copyright (c) 2012 Giovanni Trematerra
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Absolutely no warranty of function or purpose is made by the author
16 * John S. Dyson.
17 * 4. Modifications may be freely made to this file if the above conditions
18 * are met.
19 */
20
21 /*
22 * This file contains a high-performance replacement for the socket-based
23 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
24 * all features of sockets, but does do everything that pipes normally
25 * do.
26 */
27
28 /*
29 * This code has two modes of operation, a small write mode and a large
30 * write mode. The small write mode acts like conventional pipes with
31 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
32 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
33 * and PIPE_SIZE in size, the sending process pins the underlying pages in
34 * memory, and the receiving process copies directly from these pinned pages
35 * in the sending process.
36 *
37 * If the sending process receives a signal, it is possible that it will
38 * go away, and certainly its address space can change, because control
39 * is returned back to the user-mode side. In that case, the pipe code
40 * arranges to copy the buffer supplied by the user process, to a pageable
41 * kernel buffer, and the receiving process will grab the data from the
42 * pageable kernel buffer. Since signals don't happen all that often,
43 * the copy operation is normally eliminated.
44 *
45 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
46 * happen for small transfers so that the system will not spend all of
47 * its time context switching.
48 *
49 * In order to limit the resource use of pipes, two sysctls exist:
50 *
51 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
52 * address space available to us in pipe_map. This value is normally
53 * autotuned, but may also be loader tuned.
54 *
55 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
56 * memory in use by pipes.
57 *
58 * Based on how large pipekva is relative to maxpipekva, the following
59 * will happen:
60 *
61 * 0% - 50%:
62 * New pipes are given 16K of memory backing, pipes may dynamically
63 * grow to as large as 64K where needed.
64 * 50% - 75%:
65 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
66 * existing pipes may NOT grow.
67 * 75% - 100%:
68 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
69 * existing pipes will be shrunk down to 4K whenever possible.
70 *
71 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If
72 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
73 * resize which MUST occur for reverse-direction pipes when they are
74 * first used.
75 *
76 * Additional information about the current state of pipes may be obtained
77 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
78 * and kern.ipc.piperesizefail.
79 *
80 * Locking rules: There are two locks present here: A mutex, used via
81 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via
82 * the flag, as mutexes can not persist over uiomove. The mutex
83 * exists only to guard access to the flag, and is not in itself a
84 * locking mechanism. Also note that there is only a single mutex for
85 * both directions of a pipe.
86 *
87 * As pipelock() may have to sleep before it can acquire the flag, it
88 * is important to reread all data after a call to pipelock(); everything
89 * in the structure may have changed.
90 */
91
92 #include <sys/cdefs.h>
93 __FBSDID("$FreeBSD: releng/10.2/sys/kern/sys_pipe.c 278310 2015-02-06 09:02:10Z kib $");
94
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/conf.h>
98 #include <sys/fcntl.h>
99 #include <sys/file.h>
100 #include <sys/filedesc.h>
101 #include <sys/filio.h>
102 #include <sys/kernel.h>
103 #include <sys/lock.h>
104 #include <sys/mutex.h>
105 #include <sys/ttycom.h>
106 #include <sys/stat.h>
107 #include <sys/malloc.h>
108 #include <sys/poll.h>
109 #include <sys/selinfo.h>
110 #include <sys/signalvar.h>
111 #include <sys/syscallsubr.h>
112 #include <sys/sysctl.h>
113 #include <sys/sysproto.h>
114 #include <sys/pipe.h>
115 #include <sys/proc.h>
116 #include <sys/vnode.h>
117 #include <sys/uio.h>
118 #include <sys/event.h>
119
120 #include <security/mac/mac_framework.h>
121
122 #include <vm/vm.h>
123 #include <vm/vm_param.h>
124 #include <vm/vm_object.h>
125 #include <vm/vm_kern.h>
126 #include <vm/vm_extern.h>
127 #include <vm/pmap.h>
128 #include <vm/vm_map.h>
129 #include <vm/vm_page.h>
130 #include <vm/uma.h>
131
132 /*
133 * Use this define if you want to disable *fancy* VM things. Expect an
134 * approx 30% decrease in transfer rate. This could be useful for
135 * NetBSD or OpenBSD.
136 */
137 /* #define PIPE_NODIRECT */
138
139 #define PIPE_PEER(pipe) \
140 (((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
141
142 /*
143 * interfaces to the outside world
144 */
145 static fo_rdwr_t pipe_read;
146 static fo_rdwr_t pipe_write;
147 static fo_truncate_t pipe_truncate;
148 static fo_ioctl_t pipe_ioctl;
149 static fo_poll_t pipe_poll;
150 static fo_kqfilter_t pipe_kqfilter;
151 static fo_stat_t pipe_stat;
152 static fo_close_t pipe_close;
153 static fo_chmod_t pipe_chmod;
154 static fo_chown_t pipe_chown;
155
156 struct fileops pipeops = {
157 .fo_read = pipe_read,
158 .fo_write = pipe_write,
159 .fo_truncate = pipe_truncate,
160 .fo_ioctl = pipe_ioctl,
161 .fo_poll = pipe_poll,
162 .fo_kqfilter = pipe_kqfilter,
163 .fo_stat = pipe_stat,
164 .fo_close = pipe_close,
165 .fo_chmod = pipe_chmod,
166 .fo_chown = pipe_chown,
167 .fo_sendfile = invfo_sendfile,
168 .fo_flags = DFLAG_PASSABLE
169 };
170
171 static void filt_pipedetach(struct knote *kn);
172 static void filt_pipedetach_notsup(struct knote *kn);
173 static int filt_pipenotsup(struct knote *kn, long hint);
174 static int filt_piperead(struct knote *kn, long hint);
175 static int filt_pipewrite(struct knote *kn, long hint);
176
177 static struct filterops pipe_nfiltops = {
178 .f_isfd = 1,
179 .f_detach = filt_pipedetach_notsup,
180 .f_event = filt_pipenotsup
181 };
182 static struct filterops pipe_rfiltops = {
183 .f_isfd = 1,
184 .f_detach = filt_pipedetach,
185 .f_event = filt_piperead
186 };
187 static struct filterops pipe_wfiltops = {
188 .f_isfd = 1,
189 .f_detach = filt_pipedetach,
190 .f_event = filt_pipewrite
191 };
192
193 /*
194 * Default pipe buffer size(s), this can be kind-of large now because pipe
195 * space is pageable. The pipe code will try to maintain locality of
196 * reference for performance reasons, so small amounts of outstanding I/O
197 * will not wipe the cache.
198 */
199 #define MINPIPESIZE (PIPE_SIZE/3)
200 #define MAXPIPESIZE (2*PIPE_SIZE/3)
201
202 static long amountpipekva;
203 static int pipefragretry;
204 static int pipeallocfail;
205 static int piperesizefail;
206 static int piperesizeallowed = 1;
207
208 SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN,
209 &maxpipekva, 0, "Pipe KVA limit");
210 SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
211 &amountpipekva, 0, "Pipe KVA usage");
212 SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
213 &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
214 SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
215 &pipeallocfail, 0, "Pipe allocation failures");
216 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
217 &piperesizefail, 0, "Pipe resize failures");
218 SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
219 &piperesizeallowed, 0, "Pipe resizing allowed");
220
221 static void pipeinit(void *dummy __unused);
222 static void pipeclose(struct pipe *cpipe);
223 static void pipe_free_kmem(struct pipe *cpipe);
224 static void pipe_create(struct pipe *pipe, int backing);
225 static void pipe_paircreate(struct thread *td, struct pipepair **p_pp);
226 static __inline int pipelock(struct pipe *cpipe, int catch);
227 static __inline void pipeunlock(struct pipe *cpipe);
228 #ifndef PIPE_NODIRECT
229 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
230 static void pipe_destroy_write_buffer(struct pipe *wpipe);
231 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
232 static void pipe_clone_write_buffer(struct pipe *wpipe);
233 #endif
234 static int pipespace(struct pipe *cpipe, int size);
235 static int pipespace_new(struct pipe *cpipe, int size);
236
237 static int pipe_zone_ctor(void *mem, int size, void *arg, int flags);
238 static int pipe_zone_init(void *mem, int size, int flags);
239 static void pipe_zone_fini(void *mem, int size);
240
241 static uma_zone_t pipe_zone;
242 static struct unrhdr *pipeino_unr;
243 static dev_t pipedev_ino;
244
245 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
246
247 static void
248 pipeinit(void *dummy __unused)
249 {
250
251 pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
252 pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
253 UMA_ALIGN_PTR, 0);
254 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
255 pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
256 KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
257 pipedev_ino = devfs_alloc_cdp_inode();
258 KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
259 }
260
261 static int
262 pipe_zone_ctor(void *mem, int size, void *arg, int flags)
263 {
264 struct pipepair *pp;
265 struct pipe *rpipe, *wpipe;
266
267 KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
268
269 pp = (struct pipepair *)mem;
270
271 /*
272 * We zero both pipe endpoints to make sure all the kmem pointers
273 * are NULL, flag fields are zero'd, etc. We timestamp both
274 * endpoints with the same time.
275 */
276 rpipe = &pp->pp_rpipe;
277 bzero(rpipe, sizeof(*rpipe));
278 vfs_timestamp(&rpipe->pipe_ctime);
279 rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
280
281 wpipe = &pp->pp_wpipe;
282 bzero(wpipe, sizeof(*wpipe));
283 wpipe->pipe_ctime = rpipe->pipe_ctime;
284 wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
285
286 rpipe->pipe_peer = wpipe;
287 rpipe->pipe_pair = pp;
288 wpipe->pipe_peer = rpipe;
289 wpipe->pipe_pair = pp;
290
291 /*
292 * Mark both endpoints as present; they will later get free'd
293 * one at a time. When both are free'd, then the whole pair
294 * is released.
295 */
296 rpipe->pipe_present = PIPE_ACTIVE;
297 wpipe->pipe_present = PIPE_ACTIVE;
298
299 /*
300 * Eventually, the MAC Framework may initialize the label
301 * in ctor or init, but for now we do it elswhere to avoid
302 * blocking in ctor or init.
303 */
304 pp->pp_label = NULL;
305
306 return (0);
307 }
308
309 static int
310 pipe_zone_init(void *mem, int size, int flags)
311 {
312 struct pipepair *pp;
313
314 KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
315
316 pp = (struct pipepair *)mem;
317
318 mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF);
319 return (0);
320 }
321
322 static void
323 pipe_zone_fini(void *mem, int size)
324 {
325 struct pipepair *pp;
326
327 KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
328
329 pp = (struct pipepair *)mem;
330
331 mtx_destroy(&pp->pp_mtx);
332 }
333
334 static void
335 pipe_paircreate(struct thread *td, struct pipepair **p_pp)
336 {
337 struct pipepair *pp;
338 struct pipe *rpipe, *wpipe;
339
340 *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
341 #ifdef MAC
342 /*
343 * The MAC label is shared between the connected endpoints. As a
344 * result mac_pipe_init() and mac_pipe_create() are called once
345 * for the pair, and not on the endpoints.
346 */
347 mac_pipe_init(pp);
348 mac_pipe_create(td->td_ucred, pp);
349 #endif
350 rpipe = &pp->pp_rpipe;
351 wpipe = &pp->pp_wpipe;
352
353 knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
354 knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
355
356 /* Only the forward direction pipe is backed by default */
357 pipe_create(rpipe, 1);
358 pipe_create(wpipe, 0);
359
360 rpipe->pipe_state |= PIPE_DIRECTOK;
361 wpipe->pipe_state |= PIPE_DIRECTOK;
362 }
363
364 void
365 pipe_named_ctor(struct pipe **ppipe, struct thread *td)
366 {
367 struct pipepair *pp;
368
369 pipe_paircreate(td, &pp);
370 pp->pp_rpipe.pipe_state |= PIPE_NAMED;
371 *ppipe = &pp->pp_rpipe;
372 }
373
374 void
375 pipe_dtor(struct pipe *dpipe)
376 {
377 struct pipe *peer;
378 ino_t ino;
379
380 ino = dpipe->pipe_ino;
381 peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
382 funsetown(&dpipe->pipe_sigio);
383 pipeclose(dpipe);
384 if (peer != NULL) {
385 funsetown(&peer->pipe_sigio);
386 pipeclose(peer);
387 }
388 if (ino != 0 && ino != (ino_t)-1)
389 free_unr(pipeino_unr, ino);
390 }
391
392 /*
393 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
394 * the zone pick up the pieces via pipeclose().
395 */
396 int
397 kern_pipe(struct thread *td, int fildes[2])
398 {
399
400 return (kern_pipe2(td, fildes, 0));
401 }
402
403 int
404 kern_pipe2(struct thread *td, int fildes[2], int flags)
405 {
406 struct filedesc *fdp;
407 struct file *rf, *wf;
408 struct pipe *rpipe, *wpipe;
409 struct pipepair *pp;
410 int fd, fflags, error;
411
412 fdp = td->td_proc->p_fd;
413 pipe_paircreate(td, &pp);
414 rpipe = &pp->pp_rpipe;
415 wpipe = &pp->pp_wpipe;
416 error = falloc(td, &rf, &fd, flags);
417 if (error) {
418 pipeclose(rpipe);
419 pipeclose(wpipe);
420 return (error);
421 }
422 /* An extra reference on `rf' has been held for us by falloc(). */
423 fildes[0] = fd;
424
425 fflags = FREAD | FWRITE;
426 if ((flags & O_NONBLOCK) != 0)
427 fflags |= FNONBLOCK;
428
429 /*
430 * Warning: once we've gotten past allocation of the fd for the
431 * read-side, we can only drop the read side via fdrop() in order
432 * to avoid races against processes which manage to dup() the read
433 * side while we are blocked trying to allocate the write side.
434 */
435 finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops);
436 error = falloc(td, &wf, &fd, flags);
437 if (error) {
438 fdclose(fdp, rf, fildes[0], td);
439 fdrop(rf, td);
440 /* rpipe has been closed by fdrop(). */
441 pipeclose(wpipe);
442 return (error);
443 }
444 /* An extra reference on `wf' has been held for us by falloc(). */
445 finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops);
446 fdrop(wf, td);
447 fildes[1] = fd;
448 fdrop(rf, td);
449
450 return (0);
451 }
452
453 /* ARGSUSED */
454 int
455 sys_pipe(struct thread *td, struct pipe_args *uap)
456 {
457 int error;
458 int fildes[2];
459
460 error = kern_pipe(td, fildes);
461 if (error)
462 return (error);
463
464 td->td_retval[0] = fildes[0];
465 td->td_retval[1] = fildes[1];
466
467 return (0);
468 }
469
470 int
471 sys_pipe2(struct thread *td, struct pipe2_args *uap)
472 {
473 int error, fildes[2];
474
475 if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK))
476 return (EINVAL);
477 error = kern_pipe2(td, fildes, uap->flags);
478 if (error)
479 return (error);
480 error = copyout(fildes, uap->fildes, 2 * sizeof(int));
481 if (error) {
482 (void)kern_close(td, fildes[0]);
483 (void)kern_close(td, fildes[1]);
484 }
485 return (error);
486 }
487
488 /*
489 * Allocate kva for pipe circular buffer, the space is pageable
490 * This routine will 'realloc' the size of a pipe safely, if it fails
491 * it will retain the old buffer.
492 * If it fails it will return ENOMEM.
493 */
494 static int
495 pipespace_new(cpipe, size)
496 struct pipe *cpipe;
497 int size;
498 {
499 caddr_t buffer;
500 int error, cnt, firstseg;
501 static int curfail = 0;
502 static struct timeval lastfail;
503
504 KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
505 KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
506 ("pipespace: resize of direct writes not allowed"));
507 retry:
508 cnt = cpipe->pipe_buffer.cnt;
509 if (cnt > size)
510 size = cnt;
511
512 size = round_page(size);
513 buffer = (caddr_t) vm_map_min(pipe_map);
514
515 error = vm_map_find(pipe_map, NULL, 0,
516 (vm_offset_t *) &buffer, size, 0, VMFS_ANY_SPACE,
517 VM_PROT_ALL, VM_PROT_ALL, 0);
518 if (error != KERN_SUCCESS) {
519 if ((cpipe->pipe_buffer.buffer == NULL) &&
520 (size > SMALL_PIPE_SIZE)) {
521 size = SMALL_PIPE_SIZE;
522 pipefragretry++;
523 goto retry;
524 }
525 if (cpipe->pipe_buffer.buffer == NULL) {
526 pipeallocfail++;
527 if (ppsratecheck(&lastfail, &curfail, 1))
528 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
529 } else {
530 piperesizefail++;
531 }
532 return (ENOMEM);
533 }
534
535 /* copy data, then free old resources if we're resizing */
536 if (cnt > 0) {
537 if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
538 firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
539 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
540 buffer, firstseg);
541 if ((cnt - firstseg) > 0)
542 bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
543 cpipe->pipe_buffer.in);
544 } else {
545 bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
546 buffer, cnt);
547 }
548 }
549 pipe_free_kmem(cpipe);
550 cpipe->pipe_buffer.buffer = buffer;
551 cpipe->pipe_buffer.size = size;
552 cpipe->pipe_buffer.in = cnt;
553 cpipe->pipe_buffer.out = 0;
554 cpipe->pipe_buffer.cnt = cnt;
555 atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
556 return (0);
557 }
558
559 /*
560 * Wrapper for pipespace_new() that performs locking assertions.
561 */
562 static int
563 pipespace(cpipe, size)
564 struct pipe *cpipe;
565 int size;
566 {
567
568 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
569 ("Unlocked pipe passed to pipespace"));
570 return (pipespace_new(cpipe, size));
571 }
572
573 /*
574 * lock a pipe for I/O, blocking other access
575 */
576 static __inline int
577 pipelock(cpipe, catch)
578 struct pipe *cpipe;
579 int catch;
580 {
581 int error;
582
583 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
584 while (cpipe->pipe_state & PIPE_LOCKFL) {
585 cpipe->pipe_state |= PIPE_LWANT;
586 error = msleep(cpipe, PIPE_MTX(cpipe),
587 catch ? (PRIBIO | PCATCH) : PRIBIO,
588 "pipelk", 0);
589 if (error != 0)
590 return (error);
591 }
592 cpipe->pipe_state |= PIPE_LOCKFL;
593 return (0);
594 }
595
596 /*
597 * unlock a pipe I/O lock
598 */
599 static __inline void
600 pipeunlock(cpipe)
601 struct pipe *cpipe;
602 {
603
604 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
605 KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
606 ("Unlocked pipe passed to pipeunlock"));
607 cpipe->pipe_state &= ~PIPE_LOCKFL;
608 if (cpipe->pipe_state & PIPE_LWANT) {
609 cpipe->pipe_state &= ~PIPE_LWANT;
610 wakeup(cpipe);
611 }
612 }
613
614 void
615 pipeselwakeup(cpipe)
616 struct pipe *cpipe;
617 {
618
619 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
620 if (cpipe->pipe_state & PIPE_SEL) {
621 selwakeuppri(&cpipe->pipe_sel, PSOCK);
622 if (!SEL_WAITING(&cpipe->pipe_sel))
623 cpipe->pipe_state &= ~PIPE_SEL;
624 }
625 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
626 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
627 KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
628 }
629
630 /*
631 * Initialize and allocate VM and memory for pipe. The structure
632 * will start out zero'd from the ctor, so we just manage the kmem.
633 */
634 static void
635 pipe_create(pipe, backing)
636 struct pipe *pipe;
637 int backing;
638 {
639
640 if (backing) {
641 /*
642 * Note that these functions can fail if pipe map is exhausted
643 * (as a result of too many pipes created), but we ignore the
644 * error as it is not fatal and could be provoked by
645 * unprivileged users. The only consequence is worse performance
646 * with given pipe.
647 */
648 if (amountpipekva > maxpipekva / 2)
649 (void)pipespace_new(pipe, SMALL_PIPE_SIZE);
650 else
651 (void)pipespace_new(pipe, PIPE_SIZE);
652 }
653
654 pipe->pipe_ino = -1;
655 }
656
657 /* ARGSUSED */
658 static int
659 pipe_read(fp, uio, active_cred, flags, td)
660 struct file *fp;
661 struct uio *uio;
662 struct ucred *active_cred;
663 struct thread *td;
664 int flags;
665 {
666 struct pipe *rpipe;
667 int error;
668 int nread = 0;
669 int size;
670
671 rpipe = fp->f_data;
672 PIPE_LOCK(rpipe);
673 ++rpipe->pipe_busy;
674 error = pipelock(rpipe, 1);
675 if (error)
676 goto unlocked_error;
677
678 #ifdef MAC
679 error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
680 if (error)
681 goto locked_error;
682 #endif
683 if (amountpipekva > (3 * maxpipekva) / 4) {
684 if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
685 (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
686 (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
687 (piperesizeallowed == 1)) {
688 PIPE_UNLOCK(rpipe);
689 pipespace(rpipe, SMALL_PIPE_SIZE);
690 PIPE_LOCK(rpipe);
691 }
692 }
693
694 while (uio->uio_resid) {
695 /*
696 * normal pipe buffer receive
697 */
698 if (rpipe->pipe_buffer.cnt > 0) {
699 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
700 if (size > rpipe->pipe_buffer.cnt)
701 size = rpipe->pipe_buffer.cnt;
702 if (size > uio->uio_resid)
703 size = uio->uio_resid;
704
705 PIPE_UNLOCK(rpipe);
706 error = uiomove(
707 &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
708 size, uio);
709 PIPE_LOCK(rpipe);
710 if (error)
711 break;
712
713 rpipe->pipe_buffer.out += size;
714 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
715 rpipe->pipe_buffer.out = 0;
716
717 rpipe->pipe_buffer.cnt -= size;
718
719 /*
720 * If there is no more to read in the pipe, reset
721 * its pointers to the beginning. This improves
722 * cache hit stats.
723 */
724 if (rpipe->pipe_buffer.cnt == 0) {
725 rpipe->pipe_buffer.in = 0;
726 rpipe->pipe_buffer.out = 0;
727 }
728 nread += size;
729 #ifndef PIPE_NODIRECT
730 /*
731 * Direct copy, bypassing a kernel buffer.
732 */
733 } else if ((size = rpipe->pipe_map.cnt) &&
734 (rpipe->pipe_state & PIPE_DIRECTW)) {
735 if (size > uio->uio_resid)
736 size = (u_int) uio->uio_resid;
737
738 PIPE_UNLOCK(rpipe);
739 error = uiomove_fromphys(rpipe->pipe_map.ms,
740 rpipe->pipe_map.pos, size, uio);
741 PIPE_LOCK(rpipe);
742 if (error)
743 break;
744 nread += size;
745 rpipe->pipe_map.pos += size;
746 rpipe->pipe_map.cnt -= size;
747 if (rpipe->pipe_map.cnt == 0) {
748 rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW);
749 wakeup(rpipe);
750 }
751 #endif
752 } else {
753 /*
754 * detect EOF condition
755 * read returns 0 on EOF, no need to set error
756 */
757 if (rpipe->pipe_state & PIPE_EOF)
758 break;
759
760 /*
761 * If the "write-side" has been blocked, wake it up now.
762 */
763 if (rpipe->pipe_state & PIPE_WANTW) {
764 rpipe->pipe_state &= ~PIPE_WANTW;
765 wakeup(rpipe);
766 }
767
768 /*
769 * Break if some data was read.
770 */
771 if (nread > 0)
772 break;
773
774 /*
775 * Unlock the pipe buffer for our remaining processing.
776 * We will either break out with an error or we will
777 * sleep and relock to loop.
778 */
779 pipeunlock(rpipe);
780
781 /*
782 * Handle non-blocking mode operation or
783 * wait for more data.
784 */
785 if (fp->f_flag & FNONBLOCK) {
786 error = EAGAIN;
787 } else {
788 rpipe->pipe_state |= PIPE_WANTR;
789 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
790 PRIBIO | PCATCH,
791 "piperd", 0)) == 0)
792 error = pipelock(rpipe, 1);
793 }
794 if (error)
795 goto unlocked_error;
796 }
797 }
798 #ifdef MAC
799 locked_error:
800 #endif
801 pipeunlock(rpipe);
802
803 /* XXX: should probably do this before getting any locks. */
804 if (error == 0)
805 vfs_timestamp(&rpipe->pipe_atime);
806 unlocked_error:
807 --rpipe->pipe_busy;
808
809 /*
810 * PIPE_WANT processing only makes sense if pipe_busy is 0.
811 */
812 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
813 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
814 wakeup(rpipe);
815 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
816 /*
817 * Handle write blocking hysteresis.
818 */
819 if (rpipe->pipe_state & PIPE_WANTW) {
820 rpipe->pipe_state &= ~PIPE_WANTW;
821 wakeup(rpipe);
822 }
823 }
824
825 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
826 pipeselwakeup(rpipe);
827
828 PIPE_UNLOCK(rpipe);
829 return (error);
830 }
831
832 #ifndef PIPE_NODIRECT
833 /*
834 * Map the sending processes' buffer into kernel space and wire it.
835 * This is similar to a physical write operation.
836 */
837 static int
838 pipe_build_write_buffer(wpipe, uio)
839 struct pipe *wpipe;
840 struct uio *uio;
841 {
842 u_int size;
843 int i;
844
845 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
846 KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
847 ("Clone attempt on non-direct write pipe!"));
848
849 if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
850 size = wpipe->pipe_buffer.size;
851 else
852 size = uio->uio_iov->iov_len;
853
854 if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
855 (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
856 wpipe->pipe_map.ms, PIPENPAGES)) < 0)
857 return (EFAULT);
858
859 /*
860 * set up the control block
861 */
862 wpipe->pipe_map.npages = i;
863 wpipe->pipe_map.pos =
864 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
865 wpipe->pipe_map.cnt = size;
866
867 /*
868 * and update the uio data
869 */
870
871 uio->uio_iov->iov_len -= size;
872 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
873 if (uio->uio_iov->iov_len == 0)
874 uio->uio_iov++;
875 uio->uio_resid -= size;
876 uio->uio_offset += size;
877 return (0);
878 }
879
880 /*
881 * unmap and unwire the process buffer
882 */
883 static void
884 pipe_destroy_write_buffer(wpipe)
885 struct pipe *wpipe;
886 {
887
888 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
889 vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
890 wpipe->pipe_map.npages = 0;
891 }
892
893 /*
894 * In the case of a signal, the writing process might go away. This
895 * code copies the data into the circular buffer so that the source
896 * pages can be freed without loss of data.
897 */
898 static void
899 pipe_clone_write_buffer(wpipe)
900 struct pipe *wpipe;
901 {
902 struct uio uio;
903 struct iovec iov;
904 int size;
905 int pos;
906
907 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
908 size = wpipe->pipe_map.cnt;
909 pos = wpipe->pipe_map.pos;
910
911 wpipe->pipe_buffer.in = size;
912 wpipe->pipe_buffer.out = 0;
913 wpipe->pipe_buffer.cnt = size;
914 wpipe->pipe_state &= ~PIPE_DIRECTW;
915
916 PIPE_UNLOCK(wpipe);
917 iov.iov_base = wpipe->pipe_buffer.buffer;
918 iov.iov_len = size;
919 uio.uio_iov = &iov;
920 uio.uio_iovcnt = 1;
921 uio.uio_offset = 0;
922 uio.uio_resid = size;
923 uio.uio_segflg = UIO_SYSSPACE;
924 uio.uio_rw = UIO_READ;
925 uio.uio_td = curthread;
926 uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
927 PIPE_LOCK(wpipe);
928 pipe_destroy_write_buffer(wpipe);
929 }
930
931 /*
932 * This implements the pipe buffer write mechanism. Note that only
933 * a direct write OR a normal pipe write can be pending at any given time.
934 * If there are any characters in the pipe buffer, the direct write will
935 * be deferred until the receiving process grabs all of the bytes from
936 * the pipe buffer. Then the direct mapping write is set-up.
937 */
938 static int
939 pipe_direct_write(wpipe, uio)
940 struct pipe *wpipe;
941 struct uio *uio;
942 {
943 int error;
944
945 retry:
946 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
947 error = pipelock(wpipe, 1);
948 if (wpipe->pipe_state & PIPE_EOF)
949 error = EPIPE;
950 if (error) {
951 pipeunlock(wpipe);
952 goto error1;
953 }
954 while (wpipe->pipe_state & PIPE_DIRECTW) {
955 if (wpipe->pipe_state & PIPE_WANTR) {
956 wpipe->pipe_state &= ~PIPE_WANTR;
957 wakeup(wpipe);
958 }
959 pipeselwakeup(wpipe);
960 wpipe->pipe_state |= PIPE_WANTW;
961 pipeunlock(wpipe);
962 error = msleep(wpipe, PIPE_MTX(wpipe),
963 PRIBIO | PCATCH, "pipdww", 0);
964 if (error)
965 goto error1;
966 else
967 goto retry;
968 }
969 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
970 if (wpipe->pipe_buffer.cnt > 0) {
971 if (wpipe->pipe_state & PIPE_WANTR) {
972 wpipe->pipe_state &= ~PIPE_WANTR;
973 wakeup(wpipe);
974 }
975 pipeselwakeup(wpipe);
976 wpipe->pipe_state |= PIPE_WANTW;
977 pipeunlock(wpipe);
978 error = msleep(wpipe, PIPE_MTX(wpipe),
979 PRIBIO | PCATCH, "pipdwc", 0);
980 if (error)
981 goto error1;
982 else
983 goto retry;
984 }
985
986 wpipe->pipe_state |= PIPE_DIRECTW;
987
988 PIPE_UNLOCK(wpipe);
989 error = pipe_build_write_buffer(wpipe, uio);
990 PIPE_LOCK(wpipe);
991 if (error) {
992 wpipe->pipe_state &= ~PIPE_DIRECTW;
993 pipeunlock(wpipe);
994 goto error1;
995 }
996
997 error = 0;
998 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
999 if (wpipe->pipe_state & PIPE_EOF) {
1000 pipe_destroy_write_buffer(wpipe);
1001 pipeselwakeup(wpipe);
1002 pipeunlock(wpipe);
1003 error = EPIPE;
1004 goto error1;
1005 }
1006 if (wpipe->pipe_state & PIPE_WANTR) {
1007 wpipe->pipe_state &= ~PIPE_WANTR;
1008 wakeup(wpipe);
1009 }
1010 pipeselwakeup(wpipe);
1011 wpipe->pipe_state |= PIPE_WANTW;
1012 pipeunlock(wpipe);
1013 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
1014 "pipdwt", 0);
1015 pipelock(wpipe, 0);
1016 }
1017
1018 if (wpipe->pipe_state & PIPE_EOF)
1019 error = EPIPE;
1020 if (wpipe->pipe_state & PIPE_DIRECTW) {
1021 /*
1022 * this bit of trickery substitutes a kernel buffer for
1023 * the process that might be going away.
1024 */
1025 pipe_clone_write_buffer(wpipe);
1026 } else {
1027 pipe_destroy_write_buffer(wpipe);
1028 }
1029 pipeunlock(wpipe);
1030 return (error);
1031
1032 error1:
1033 wakeup(wpipe);
1034 return (error);
1035 }
1036 #endif
1037
1038 static int
1039 pipe_write(fp, uio, active_cred, flags, td)
1040 struct file *fp;
1041 struct uio *uio;
1042 struct ucred *active_cred;
1043 struct thread *td;
1044 int flags;
1045 {
1046 int error = 0;
1047 int desiredsize;
1048 ssize_t orig_resid;
1049 struct pipe *wpipe, *rpipe;
1050
1051 rpipe = fp->f_data;
1052 wpipe = PIPE_PEER(rpipe);
1053 PIPE_LOCK(rpipe);
1054 error = pipelock(wpipe, 1);
1055 if (error) {
1056 PIPE_UNLOCK(rpipe);
1057 return (error);
1058 }
1059 /*
1060 * detect loss of pipe read side, issue SIGPIPE if lost.
1061 */
1062 if (wpipe->pipe_present != PIPE_ACTIVE ||
1063 (wpipe->pipe_state & PIPE_EOF)) {
1064 pipeunlock(wpipe);
1065 PIPE_UNLOCK(rpipe);
1066 return (EPIPE);
1067 }
1068 #ifdef MAC
1069 error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
1070 if (error) {
1071 pipeunlock(wpipe);
1072 PIPE_UNLOCK(rpipe);
1073 return (error);
1074 }
1075 #endif
1076 ++wpipe->pipe_busy;
1077
1078 /* Choose a larger size if it's advantageous */
1079 desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
1080 while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
1081 if (piperesizeallowed != 1)
1082 break;
1083 if (amountpipekva > maxpipekva / 2)
1084 break;
1085 if (desiredsize == BIG_PIPE_SIZE)
1086 break;
1087 desiredsize = desiredsize * 2;
1088 }
1089
1090 /* Choose a smaller size if we're in a OOM situation */
1091 if ((amountpipekva > (3 * maxpipekva) / 4) &&
1092 (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1093 (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1094 (piperesizeallowed == 1))
1095 desiredsize = SMALL_PIPE_SIZE;
1096
1097 /* Resize if the above determined that a new size was necessary */
1098 if ((desiredsize != wpipe->pipe_buffer.size) &&
1099 ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1100 PIPE_UNLOCK(wpipe);
1101 pipespace(wpipe, desiredsize);
1102 PIPE_LOCK(wpipe);
1103 }
1104 if (wpipe->pipe_buffer.size == 0) {
1105 /*
1106 * This can only happen for reverse direction use of pipes
1107 * in a complete OOM situation.
1108 */
1109 error = ENOMEM;
1110 --wpipe->pipe_busy;
1111 pipeunlock(wpipe);
1112 PIPE_UNLOCK(wpipe);
1113 return (error);
1114 }
1115
1116 pipeunlock(wpipe);
1117
1118 orig_resid = uio->uio_resid;
1119
1120 while (uio->uio_resid) {
1121 int space;
1122
1123 pipelock(wpipe, 0);
1124 if (wpipe->pipe_state & PIPE_EOF) {
1125 pipeunlock(wpipe);
1126 error = EPIPE;
1127 break;
1128 }
1129 #ifndef PIPE_NODIRECT
1130 /*
1131 * If the transfer is large, we can gain performance if
1132 * we do process-to-process copies directly.
1133 * If the write is non-blocking, we don't use the
1134 * direct write mechanism.
1135 *
1136 * The direct write mechanism will detect the reader going
1137 * away on us.
1138 */
1139 if (uio->uio_segflg == UIO_USERSPACE &&
1140 uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1141 wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1142 (fp->f_flag & FNONBLOCK) == 0) {
1143 pipeunlock(wpipe);
1144 error = pipe_direct_write(wpipe, uio);
1145 if (error)
1146 break;
1147 continue;
1148 }
1149 #endif
1150
1151 /*
1152 * Pipe buffered writes cannot be coincidental with
1153 * direct writes. We wait until the currently executing
1154 * direct write is completed before we start filling the
1155 * pipe buffer. We break out if a signal occurs or the
1156 * reader goes away.
1157 */
1158 if (wpipe->pipe_state & PIPE_DIRECTW) {
1159 if (wpipe->pipe_state & PIPE_WANTR) {
1160 wpipe->pipe_state &= ~PIPE_WANTR;
1161 wakeup(wpipe);
1162 }
1163 pipeselwakeup(wpipe);
1164 wpipe->pipe_state |= PIPE_WANTW;
1165 pipeunlock(wpipe);
1166 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1167 "pipbww", 0);
1168 if (error)
1169 break;
1170 else
1171 continue;
1172 }
1173
1174 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1175
1176 /* Writes of size <= PIPE_BUF must be atomic. */
1177 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1178 space = 0;
1179
1180 if (space > 0) {
1181 int size; /* Transfer size */
1182 int segsize; /* first segment to transfer */
1183
1184 /*
1185 * Transfer size is minimum of uio transfer
1186 * and free space in pipe buffer.
1187 */
1188 if (space > uio->uio_resid)
1189 size = uio->uio_resid;
1190 else
1191 size = space;
1192 /*
1193 * First segment to transfer is minimum of
1194 * transfer size and contiguous space in
1195 * pipe buffer. If first segment to transfer
1196 * is less than the transfer size, we've got
1197 * a wraparound in the buffer.
1198 */
1199 segsize = wpipe->pipe_buffer.size -
1200 wpipe->pipe_buffer.in;
1201 if (segsize > size)
1202 segsize = size;
1203
1204 /* Transfer first segment */
1205
1206 PIPE_UNLOCK(rpipe);
1207 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1208 segsize, uio);
1209 PIPE_LOCK(rpipe);
1210
1211 if (error == 0 && segsize < size) {
1212 KASSERT(wpipe->pipe_buffer.in + segsize ==
1213 wpipe->pipe_buffer.size,
1214 ("Pipe buffer wraparound disappeared"));
1215 /*
1216 * Transfer remaining part now, to
1217 * support atomic writes. Wraparound
1218 * happened.
1219 */
1220
1221 PIPE_UNLOCK(rpipe);
1222 error = uiomove(
1223 &wpipe->pipe_buffer.buffer[0],
1224 size - segsize, uio);
1225 PIPE_LOCK(rpipe);
1226 }
1227 if (error == 0) {
1228 wpipe->pipe_buffer.in += size;
1229 if (wpipe->pipe_buffer.in >=
1230 wpipe->pipe_buffer.size) {
1231 KASSERT(wpipe->pipe_buffer.in ==
1232 size - segsize +
1233 wpipe->pipe_buffer.size,
1234 ("Expected wraparound bad"));
1235 wpipe->pipe_buffer.in = size - segsize;
1236 }
1237
1238 wpipe->pipe_buffer.cnt += size;
1239 KASSERT(wpipe->pipe_buffer.cnt <=
1240 wpipe->pipe_buffer.size,
1241 ("Pipe buffer overflow"));
1242 }
1243 pipeunlock(wpipe);
1244 if (error != 0)
1245 break;
1246 } else {
1247 /*
1248 * If the "read-side" has been blocked, wake it up now.
1249 */
1250 if (wpipe->pipe_state & PIPE_WANTR) {
1251 wpipe->pipe_state &= ~PIPE_WANTR;
1252 wakeup(wpipe);
1253 }
1254
1255 /*
1256 * don't block on non-blocking I/O
1257 */
1258 if (fp->f_flag & FNONBLOCK) {
1259 error = EAGAIN;
1260 pipeunlock(wpipe);
1261 break;
1262 }
1263
1264 /*
1265 * We have no more space and have something to offer,
1266 * wake up select/poll.
1267 */
1268 pipeselwakeup(wpipe);
1269
1270 wpipe->pipe_state |= PIPE_WANTW;
1271 pipeunlock(wpipe);
1272 error = msleep(wpipe, PIPE_MTX(rpipe),
1273 PRIBIO | PCATCH, "pipewr", 0);
1274 if (error != 0)
1275 break;
1276 }
1277 }
1278
1279 pipelock(wpipe, 0);
1280 --wpipe->pipe_busy;
1281
1282 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1283 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1284 wakeup(wpipe);
1285 } else if (wpipe->pipe_buffer.cnt > 0) {
1286 /*
1287 * If we have put any characters in the buffer, we wake up
1288 * the reader.
1289 */
1290 if (wpipe->pipe_state & PIPE_WANTR) {
1291 wpipe->pipe_state &= ~PIPE_WANTR;
1292 wakeup(wpipe);
1293 }
1294 }
1295
1296 /*
1297 * Don't return EPIPE if any byte was written.
1298 * EINTR and other interrupts are handled by generic I/O layer.
1299 * Do not pretend that I/O succeeded for obvious user error
1300 * like EFAULT.
1301 */
1302 if (uio->uio_resid != orig_resid && error == EPIPE)
1303 error = 0;
1304
1305 if (error == 0)
1306 vfs_timestamp(&wpipe->pipe_mtime);
1307
1308 /*
1309 * We have something to offer,
1310 * wake up select/poll.
1311 */
1312 if (wpipe->pipe_buffer.cnt)
1313 pipeselwakeup(wpipe);
1314
1315 pipeunlock(wpipe);
1316 PIPE_UNLOCK(rpipe);
1317 return (error);
1318 }
1319
1320 /* ARGSUSED */
1321 static int
1322 pipe_truncate(fp, length, active_cred, td)
1323 struct file *fp;
1324 off_t length;
1325 struct ucred *active_cred;
1326 struct thread *td;
1327 {
1328
1329 /* For named pipes call the vnode operation. */
1330 if (fp->f_vnode != NULL)
1331 return (vnops.fo_truncate(fp, length, active_cred, td));
1332 return (EINVAL);
1333 }
1334
1335 /*
1336 * we implement a very minimal set of ioctls for compatibility with sockets.
1337 */
1338 static int
1339 pipe_ioctl(fp, cmd, data, active_cred, td)
1340 struct file *fp;
1341 u_long cmd;
1342 void *data;
1343 struct ucred *active_cred;
1344 struct thread *td;
1345 {
1346 struct pipe *mpipe = fp->f_data;
1347 int error;
1348
1349 PIPE_LOCK(mpipe);
1350
1351 #ifdef MAC
1352 error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1353 if (error) {
1354 PIPE_UNLOCK(mpipe);
1355 return (error);
1356 }
1357 #endif
1358
1359 error = 0;
1360 switch (cmd) {
1361
1362 case FIONBIO:
1363 break;
1364
1365 case FIOASYNC:
1366 if (*(int *)data) {
1367 mpipe->pipe_state |= PIPE_ASYNC;
1368 } else {
1369 mpipe->pipe_state &= ~PIPE_ASYNC;
1370 }
1371 break;
1372
1373 case FIONREAD:
1374 if (!(fp->f_flag & FREAD)) {
1375 *(int *)data = 0;
1376 PIPE_UNLOCK(mpipe);
1377 return (0);
1378 }
1379 if (mpipe->pipe_state & PIPE_DIRECTW)
1380 *(int *)data = mpipe->pipe_map.cnt;
1381 else
1382 *(int *)data = mpipe->pipe_buffer.cnt;
1383 break;
1384
1385 case FIOSETOWN:
1386 PIPE_UNLOCK(mpipe);
1387 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1388 goto out_unlocked;
1389
1390 case FIOGETOWN:
1391 *(int *)data = fgetown(&mpipe->pipe_sigio);
1392 break;
1393
1394 /* This is deprecated, FIOSETOWN should be used instead. */
1395 case TIOCSPGRP:
1396 PIPE_UNLOCK(mpipe);
1397 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1398 goto out_unlocked;
1399
1400 /* This is deprecated, FIOGETOWN should be used instead. */
1401 case TIOCGPGRP:
1402 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1403 break;
1404
1405 default:
1406 error = ENOTTY;
1407 break;
1408 }
1409 PIPE_UNLOCK(mpipe);
1410 out_unlocked:
1411 return (error);
1412 }
1413
1414 static int
1415 pipe_poll(fp, events, active_cred, td)
1416 struct file *fp;
1417 int events;
1418 struct ucred *active_cred;
1419 struct thread *td;
1420 {
1421 struct pipe *rpipe;
1422 struct pipe *wpipe;
1423 int levents, revents;
1424 #ifdef MAC
1425 int error;
1426 #endif
1427
1428 revents = 0;
1429 rpipe = fp->f_data;
1430 wpipe = PIPE_PEER(rpipe);
1431 PIPE_LOCK(rpipe);
1432 #ifdef MAC
1433 error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1434 if (error)
1435 goto locked_error;
1436 #endif
1437 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
1438 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1439 (rpipe->pipe_buffer.cnt > 0))
1440 revents |= events & (POLLIN | POLLRDNORM);
1441
1442 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
1443 if (wpipe->pipe_present != PIPE_ACTIVE ||
1444 (wpipe->pipe_state & PIPE_EOF) ||
1445 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1446 ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
1447 wpipe->pipe_buffer.size == 0)))
1448 revents |= events & (POLLOUT | POLLWRNORM);
1449
1450 levents = events &
1451 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
1452 if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
1453 fp->f_seqcount == rpipe->pipe_wgen)
1454 events |= POLLINIGNEOF;
1455
1456 if ((events & POLLINIGNEOF) == 0) {
1457 if (rpipe->pipe_state & PIPE_EOF) {
1458 revents |= (events & (POLLIN | POLLRDNORM));
1459 if (wpipe->pipe_present != PIPE_ACTIVE ||
1460 (wpipe->pipe_state & PIPE_EOF))
1461 revents |= POLLHUP;
1462 }
1463 }
1464
1465 if (revents == 0) {
1466 if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
1467 selrecord(td, &rpipe->pipe_sel);
1468 if (SEL_WAITING(&rpipe->pipe_sel))
1469 rpipe->pipe_state |= PIPE_SEL;
1470 }
1471
1472 if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1473 selrecord(td, &wpipe->pipe_sel);
1474 if (SEL_WAITING(&wpipe->pipe_sel))
1475 wpipe->pipe_state |= PIPE_SEL;
1476 }
1477 }
1478 #ifdef MAC
1479 locked_error:
1480 #endif
1481 PIPE_UNLOCK(rpipe);
1482
1483 return (revents);
1484 }
1485
1486 /*
1487 * We shouldn't need locks here as we're doing a read and this should
1488 * be a natural race.
1489 */
1490 static int
1491 pipe_stat(fp, ub, active_cred, td)
1492 struct file *fp;
1493 struct stat *ub;
1494 struct ucred *active_cred;
1495 struct thread *td;
1496 {
1497 struct pipe *pipe;
1498 int new_unr;
1499 #ifdef MAC
1500 int error;
1501 #endif
1502
1503 pipe = fp->f_data;
1504 PIPE_LOCK(pipe);
1505 #ifdef MAC
1506 error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1507 if (error) {
1508 PIPE_UNLOCK(pipe);
1509 return (error);
1510 }
1511 #endif
1512
1513 /* For named pipes ask the underlying filesystem. */
1514 if (pipe->pipe_state & PIPE_NAMED) {
1515 PIPE_UNLOCK(pipe);
1516 return (vnops.fo_stat(fp, ub, active_cred, td));
1517 }
1518
1519 /*
1520 * Lazily allocate an inode number for the pipe. Most pipe
1521 * users do not call fstat(2) on the pipe, which means that
1522 * postponing the inode allocation until it is must be
1523 * returned to userland is useful. If alloc_unr failed,
1524 * assign st_ino zero instead of returning an error.
1525 * Special pipe_ino values:
1526 * -1 - not yet initialized;
1527 * 0 - alloc_unr failed, return 0 as st_ino forever.
1528 */
1529 if (pipe->pipe_ino == (ino_t)-1) {
1530 new_unr = alloc_unr(pipeino_unr);
1531 if (new_unr != -1)
1532 pipe->pipe_ino = new_unr;
1533 else
1534 pipe->pipe_ino = 0;
1535 }
1536 PIPE_UNLOCK(pipe);
1537
1538 bzero(ub, sizeof(*ub));
1539 ub->st_mode = S_IFIFO;
1540 ub->st_blksize = PAGE_SIZE;
1541 if (pipe->pipe_state & PIPE_DIRECTW)
1542 ub->st_size = pipe->pipe_map.cnt;
1543 else
1544 ub->st_size = pipe->pipe_buffer.cnt;
1545 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1546 ub->st_atim = pipe->pipe_atime;
1547 ub->st_mtim = pipe->pipe_mtime;
1548 ub->st_ctim = pipe->pipe_ctime;
1549 ub->st_uid = fp->f_cred->cr_uid;
1550 ub->st_gid = fp->f_cred->cr_gid;
1551 ub->st_dev = pipedev_ino;
1552 ub->st_ino = pipe->pipe_ino;
1553 /*
1554 * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1555 */
1556 return (0);
1557 }
1558
1559 /* ARGSUSED */
1560 static int
1561 pipe_close(fp, td)
1562 struct file *fp;
1563 struct thread *td;
1564 {
1565
1566 if (fp->f_vnode != NULL)
1567 return vnops.fo_close(fp, td);
1568 fp->f_ops = &badfileops;
1569 pipe_dtor(fp->f_data);
1570 fp->f_data = NULL;
1571 return (0);
1572 }
1573
1574 static int
1575 pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
1576 {
1577 struct pipe *cpipe;
1578 int error;
1579
1580 cpipe = fp->f_data;
1581 if (cpipe->pipe_state & PIPE_NAMED)
1582 error = vn_chmod(fp, mode, active_cred, td);
1583 else
1584 error = invfo_chmod(fp, mode, active_cred, td);
1585 return (error);
1586 }
1587
1588 static int
1589 pipe_chown(fp, uid, gid, active_cred, td)
1590 struct file *fp;
1591 uid_t uid;
1592 gid_t gid;
1593 struct ucred *active_cred;
1594 struct thread *td;
1595 {
1596 struct pipe *cpipe;
1597 int error;
1598
1599 cpipe = fp->f_data;
1600 if (cpipe->pipe_state & PIPE_NAMED)
1601 error = vn_chown(fp, uid, gid, active_cred, td);
1602 else
1603 error = invfo_chown(fp, uid, gid, active_cred, td);
1604 return (error);
1605 }
1606
1607 static void
1608 pipe_free_kmem(cpipe)
1609 struct pipe *cpipe;
1610 {
1611
1612 KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1613 ("pipe_free_kmem: pipe mutex locked"));
1614
1615 if (cpipe->pipe_buffer.buffer != NULL) {
1616 atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
1617 vm_map_remove(pipe_map,
1618 (vm_offset_t)cpipe->pipe_buffer.buffer,
1619 (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
1620 cpipe->pipe_buffer.buffer = NULL;
1621 }
1622 #ifndef PIPE_NODIRECT
1623 {
1624 cpipe->pipe_map.cnt = 0;
1625 cpipe->pipe_map.pos = 0;
1626 cpipe->pipe_map.npages = 0;
1627 }
1628 #endif
1629 }
1630
1631 /*
1632 * shutdown the pipe
1633 */
1634 static void
1635 pipeclose(cpipe)
1636 struct pipe *cpipe;
1637 {
1638 struct pipepair *pp;
1639 struct pipe *ppipe;
1640
1641 KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1642
1643 PIPE_LOCK(cpipe);
1644 pipelock(cpipe, 0);
1645 pp = cpipe->pipe_pair;
1646
1647 pipeselwakeup(cpipe);
1648
1649 /*
1650 * If the other side is blocked, wake it up saying that
1651 * we want to close it down.
1652 */
1653 cpipe->pipe_state |= PIPE_EOF;
1654 while (cpipe->pipe_busy) {
1655 wakeup(cpipe);
1656 cpipe->pipe_state |= PIPE_WANT;
1657 pipeunlock(cpipe);
1658 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1659 pipelock(cpipe, 0);
1660 }
1661
1662
1663 /*
1664 * Disconnect from peer, if any.
1665 */
1666 ppipe = cpipe->pipe_peer;
1667 if (ppipe->pipe_present == PIPE_ACTIVE) {
1668 pipeselwakeup(ppipe);
1669
1670 ppipe->pipe_state |= PIPE_EOF;
1671 wakeup(ppipe);
1672 KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1673 }
1674
1675 /*
1676 * Mark this endpoint as free. Release kmem resources. We
1677 * don't mark this endpoint as unused until we've finished
1678 * doing that, or the pipe might disappear out from under
1679 * us.
1680 */
1681 PIPE_UNLOCK(cpipe);
1682 pipe_free_kmem(cpipe);
1683 PIPE_LOCK(cpipe);
1684 cpipe->pipe_present = PIPE_CLOSING;
1685 pipeunlock(cpipe);
1686
1687 /*
1688 * knlist_clear() may sleep dropping the PIPE_MTX. Set the
1689 * PIPE_FINALIZED, that allows other end to free the
1690 * pipe_pair, only after the knotes are completely dismantled.
1691 */
1692 knlist_clear(&cpipe->pipe_sel.si_note, 1);
1693 cpipe->pipe_present = PIPE_FINALIZED;
1694 seldrain(&cpipe->pipe_sel);
1695 knlist_destroy(&cpipe->pipe_sel.si_note);
1696
1697 /*
1698 * If both endpoints are now closed, release the memory for the
1699 * pipe pair. If not, unlock.
1700 */
1701 if (ppipe->pipe_present == PIPE_FINALIZED) {
1702 PIPE_UNLOCK(cpipe);
1703 #ifdef MAC
1704 mac_pipe_destroy(pp);
1705 #endif
1706 uma_zfree(pipe_zone, cpipe->pipe_pair);
1707 } else
1708 PIPE_UNLOCK(cpipe);
1709 }
1710
1711 /*ARGSUSED*/
1712 static int
1713 pipe_kqfilter(struct file *fp, struct knote *kn)
1714 {
1715 struct pipe *cpipe;
1716
1717 /*
1718 * If a filter is requested that is not supported by this file
1719 * descriptor, don't return an error, but also don't ever generate an
1720 * event.
1721 */
1722 if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
1723 kn->kn_fop = &pipe_nfiltops;
1724 return (0);
1725 }
1726 if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
1727 kn->kn_fop = &pipe_nfiltops;
1728 return (0);
1729 }
1730 cpipe = fp->f_data;
1731 PIPE_LOCK(cpipe);
1732 switch (kn->kn_filter) {
1733 case EVFILT_READ:
1734 kn->kn_fop = &pipe_rfiltops;
1735 break;
1736 case EVFILT_WRITE:
1737 kn->kn_fop = &pipe_wfiltops;
1738 if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
1739 /* other end of pipe has been closed */
1740 PIPE_UNLOCK(cpipe);
1741 return (EPIPE);
1742 }
1743 cpipe = PIPE_PEER(cpipe);
1744 break;
1745 default:
1746 PIPE_UNLOCK(cpipe);
1747 return (EINVAL);
1748 }
1749
1750 kn->kn_hook = cpipe;
1751 knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
1752 PIPE_UNLOCK(cpipe);
1753 return (0);
1754 }
1755
1756 static void
1757 filt_pipedetach(struct knote *kn)
1758 {
1759 struct pipe *cpipe = kn->kn_hook;
1760
1761 PIPE_LOCK(cpipe);
1762 knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
1763 PIPE_UNLOCK(cpipe);
1764 }
1765
1766 /*ARGSUSED*/
1767 static int
1768 filt_piperead(struct knote *kn, long hint)
1769 {
1770 struct pipe *rpipe = kn->kn_hook;
1771 struct pipe *wpipe = rpipe->pipe_peer;
1772 int ret;
1773
1774 PIPE_LOCK_ASSERT(rpipe, MA_OWNED);
1775 kn->kn_data = rpipe->pipe_buffer.cnt;
1776 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1777 kn->kn_data = rpipe->pipe_map.cnt;
1778
1779 if ((rpipe->pipe_state & PIPE_EOF) ||
1780 wpipe->pipe_present != PIPE_ACTIVE ||
1781 (wpipe->pipe_state & PIPE_EOF)) {
1782 kn->kn_flags |= EV_EOF;
1783 return (1);
1784 }
1785 ret = kn->kn_data > 0;
1786 return ret;
1787 }
1788
1789 /*ARGSUSED*/
1790 static int
1791 filt_pipewrite(struct knote *kn, long hint)
1792 {
1793 struct pipe *wpipe;
1794
1795 wpipe = kn->kn_hook;
1796 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1797 if (wpipe->pipe_present != PIPE_ACTIVE ||
1798 (wpipe->pipe_state & PIPE_EOF)) {
1799 kn->kn_data = 0;
1800 kn->kn_flags |= EV_EOF;
1801 return (1);
1802 }
1803 kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
1804 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
1805 if (wpipe->pipe_state & PIPE_DIRECTW)
1806 kn->kn_data = 0;
1807
1808 return (kn->kn_data >= PIPE_BUF);
1809 }
1810
1811 static void
1812 filt_pipedetach_notsup(struct knote *kn)
1813 {
1814
1815 }
1816
1817 static int
1818 filt_pipenotsup(struct knote *kn, long hint)
1819 {
1820
1821 return (0);
1822 }
Cache object: 552c98c5d5c3ce3fc024ad96cf424acd
|