FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_pipe.c
1 /*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 *
19 * $FreeBSD: releng/5.1/sys/kern/sys_pipe.c 112981 2003-04-02 15:24:50Z hsu $
20 */
21
22 /*
23 * This file contains a high-performance replacement for the socket-based
24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
25 * all features of sockets, but does do everything that pipes normally
26 * do.
27 */
28
29 /*
30 * This code has two modes of operation, a small write mode and a large
31 * write mode. The small write mode acts like conventional pipes with
32 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
33 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
35 * the receiving process can copy it directly from the pages in the sending
36 * process.
37 *
38 * If the sending process receives a signal, it is possible that it will
39 * go away, and certainly its address space can change, because control
40 * is returned back to the user-mode side. In that case, the pipe code
41 * arranges to copy the buffer supplied by the user process, to a pageable
42 * kernel buffer, and the receiving process will grab the data from the
43 * pageable kernel buffer. Since signals don't happen all that often,
44 * the copy operation is normally eliminated.
45 *
46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
47 * happen for small transfers so that the system will not spend all of
48 * its time context switching. PIPE_SIZE is constrained by the
49 * amount of kernel virtual memory.
50 */
51
52 #include "opt_mac.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/fcntl.h>
57 #include <sys/file.h>
58 #include <sys/filedesc.h>
59 #include <sys/filio.h>
60 #include <sys/kernel.h>
61 #include <sys/lock.h>
62 #include <sys/mac.h>
63 #include <sys/mutex.h>
64 #include <sys/ttycom.h>
65 #include <sys/stat.h>
66 #include <sys/malloc.h>
67 #include <sys/poll.h>
68 #include <sys/selinfo.h>
69 #include <sys/signalvar.h>
70 #include <sys/sysproto.h>
71 #include <sys/pipe.h>
72 #include <sys/proc.h>
73 #include <sys/vnode.h>
74 #include <sys/uio.h>
75 #include <sys/event.h>
76
77 #include <vm/vm.h>
78 #include <vm/vm_param.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_extern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/uma.h>
86
87 /*
88 * Use this define if you want to disable *fancy* VM things. Expect an
89 * approx 30% decrease in transfer rate. This could be useful for
90 * NetBSD or OpenBSD.
91 */
92 /* #define PIPE_NODIRECT */
93
94 /*
95 * interfaces to the outside world
96 */
97 static fo_rdwr_t pipe_read;
98 static fo_rdwr_t pipe_write;
99 static fo_ioctl_t pipe_ioctl;
100 static fo_poll_t pipe_poll;
101 static fo_kqfilter_t pipe_kqfilter;
102 static fo_stat_t pipe_stat;
103 static fo_close_t pipe_close;
104
105 static struct fileops pipeops = {
106 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
107 pipe_stat, pipe_close, DFLAG_PASSABLE
108 };
109
110 static void filt_pipedetach(struct knote *kn);
111 static int filt_piperead(struct knote *kn, long hint);
112 static int filt_pipewrite(struct knote *kn, long hint);
113
114 static struct filterops pipe_rfiltops =
115 { 1, NULL, filt_pipedetach, filt_piperead };
116 static struct filterops pipe_wfiltops =
117 { 1, NULL, filt_pipedetach, filt_pipewrite };
118
119 #define PIPE_GET_GIANT(pipe) \
120 do { \
121 KASSERT(((pipe)->pipe_state & PIPE_LOCKFL) != 0, \
122 ("%s:%d PIPE_GET_GIANT: line pipe not locked", \
123 __FILE__, __LINE__)); \
124 PIPE_UNLOCK(pipe); \
125 mtx_lock(&Giant); \
126 } while (0)
127
128 #define PIPE_DROP_GIANT(pipe) \
129 do { \
130 mtx_unlock(&Giant); \
131 PIPE_LOCK(pipe); \
132 } while (0)
133
134 /*
135 * Default pipe buffer size(s), this can be kind-of large now because pipe
136 * space is pageable. The pipe code will try to maintain locality of
137 * reference for performance reasons, so small amounts of outstanding I/O
138 * will not wipe the cache.
139 */
140 #define MINPIPESIZE (PIPE_SIZE/3)
141 #define MAXPIPESIZE (2*PIPE_SIZE/3)
142
143 /*
144 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
145 * is there so that on large systems, we don't exhaust it.
146 */
147 #define MAXPIPEKVA (8*1024*1024)
148
149 /*
150 * Limit for direct transfers, we cannot, of course limit
151 * the amount of kva for pipes in general though.
152 */
153 #define LIMITPIPEKVA (16*1024*1024)
154
155 /*
156 * Limit the number of "big" pipes
157 */
158 #define LIMITBIGPIPES 32
159 static int nbigpipe;
160
161 static int amountpipekva;
162
163 static void pipeinit(void *dummy __unused);
164 static void pipeclose(struct pipe *cpipe);
165 static void pipe_free_kmem(struct pipe *cpipe);
166 static int pipe_create(struct pipe **cpipep);
167 static __inline int pipelock(struct pipe *cpipe, int catch);
168 static __inline void pipeunlock(struct pipe *cpipe);
169 static __inline void pipeselwakeup(struct pipe *cpipe);
170 #ifndef PIPE_NODIRECT
171 static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
172 static void pipe_destroy_write_buffer(struct pipe *wpipe);
173 static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
174 static void pipe_clone_write_buffer(struct pipe *wpipe);
175 #endif
176 static int pipespace(struct pipe *cpipe, int size);
177
178 static uma_zone_t pipe_zone;
179
180 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
181
182 static void
183 pipeinit(void *dummy __unused)
184 {
185 pipe_zone = uma_zcreate("PIPE", sizeof(struct pipe), NULL,
186 NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
187 }
188
189 /*
190 * The pipe system call for the DTYPE_PIPE type of pipes
191 */
192
193 /* ARGSUSED */
194 int
195 pipe(td, uap)
196 struct thread *td;
197 struct pipe_args /* {
198 int dummy;
199 } */ *uap;
200 {
201 struct filedesc *fdp = td->td_proc->p_fd;
202 struct file *rf, *wf;
203 struct pipe *rpipe, *wpipe;
204 struct mtx *pmtx;
205 int fd, error;
206
207 KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
208
209 pmtx = malloc(sizeof(*pmtx), M_TEMP, M_WAITOK | M_ZERO);
210
211 rpipe = wpipe = NULL;
212 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
213 pipeclose(rpipe);
214 pipeclose(wpipe);
215 free(pmtx, M_TEMP);
216 return (ENFILE);
217 }
218
219 rpipe->pipe_state |= PIPE_DIRECTOK;
220 wpipe->pipe_state |= PIPE_DIRECTOK;
221
222 error = falloc(td, &rf, &fd);
223 if (error) {
224 pipeclose(rpipe);
225 pipeclose(wpipe);
226 free(pmtx, M_TEMP);
227 return (error);
228 }
229 fhold(rf);
230 td->td_retval[0] = fd;
231
232 /*
233 * Warning: once we've gotten past allocation of the fd for the
234 * read-side, we can only drop the read side via fdrop() in order
235 * to avoid races against processes which manage to dup() the read
236 * side while we are blocked trying to allocate the write side.
237 */
238 FILE_LOCK(rf);
239 rf->f_flag = FREAD | FWRITE;
240 rf->f_type = DTYPE_PIPE;
241 rf->f_data = rpipe;
242 rf->f_ops = &pipeops;
243 FILE_UNLOCK(rf);
244 error = falloc(td, &wf, &fd);
245 if (error) {
246 FILEDESC_LOCK(fdp);
247 if (fdp->fd_ofiles[td->td_retval[0]] == rf) {
248 fdp->fd_ofiles[td->td_retval[0]] = NULL;
249 FILEDESC_UNLOCK(fdp);
250 fdrop(rf, td);
251 } else
252 FILEDESC_UNLOCK(fdp);
253 fdrop(rf, td);
254 /* rpipe has been closed by fdrop(). */
255 pipeclose(wpipe);
256 free(pmtx, M_TEMP);
257 return (error);
258 }
259 FILE_LOCK(wf);
260 wf->f_flag = FREAD | FWRITE;
261 wf->f_type = DTYPE_PIPE;
262 wf->f_data = wpipe;
263 wf->f_ops = &pipeops;
264 FILE_UNLOCK(wf);
265 td->td_retval[1] = fd;
266 rpipe->pipe_peer = wpipe;
267 wpipe->pipe_peer = rpipe;
268 #ifdef MAC
269 /*
270 * struct pipe represents a pipe endpoint. The MAC label is shared
271 * between the connected endpoints. As a result mac_init_pipe() and
272 * mac_create_pipe() should only be called on one of the endpoints
273 * after they have been connected.
274 */
275 mac_init_pipe(rpipe);
276 mac_create_pipe(td->td_ucred, rpipe);
277 #endif
278 mtx_init(pmtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
279 rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx;
280 fdrop(rf, td);
281
282 return (0);
283 }
284
285 /*
286 * Allocate kva for pipe circular buffer, the space is pageable
287 * This routine will 'realloc' the size of a pipe safely, if it fails
288 * it will retain the old buffer.
289 * If it fails it will return ENOMEM.
290 */
291 static int
292 pipespace(cpipe, size)
293 struct pipe *cpipe;
294 int size;
295 {
296 struct vm_object *object;
297 caddr_t buffer;
298 int npages, error;
299
300 GIANT_REQUIRED;
301 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
302 ("pipespace: pipe mutex locked"));
303
304 npages = round_page(size)/PAGE_SIZE;
305 /*
306 * Create an object, I don't like the idea of paging to/from
307 * kernel_object.
308 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
309 */
310 object = vm_object_allocate(OBJT_DEFAULT, npages);
311 buffer = (caddr_t) vm_map_min(kernel_map);
312
313 /*
314 * Insert the object into the kernel map, and allocate kva for it.
315 * The map entry is, by default, pageable.
316 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
317 */
318 error = vm_map_find(kernel_map, object, 0,
319 (vm_offset_t *) &buffer, size, 1,
320 VM_PROT_ALL, VM_PROT_ALL, 0);
321
322 if (error != KERN_SUCCESS) {
323 vm_object_deallocate(object);
324 return (ENOMEM);
325 }
326
327 /* free old resources if we're resizing */
328 pipe_free_kmem(cpipe);
329 cpipe->pipe_buffer.object = object;
330 cpipe->pipe_buffer.buffer = buffer;
331 cpipe->pipe_buffer.size = size;
332 cpipe->pipe_buffer.in = 0;
333 cpipe->pipe_buffer.out = 0;
334 cpipe->pipe_buffer.cnt = 0;
335 atomic_add_int(&amountpipekva, cpipe->pipe_buffer.size);
336 return (0);
337 }
338
339 /*
340 * initialize and allocate VM and memory for pipe
341 */
342 static int
343 pipe_create(cpipep)
344 struct pipe **cpipep;
345 {
346 struct pipe *cpipe;
347 int error;
348
349 *cpipep = uma_zalloc(pipe_zone, M_WAITOK);
350 if (*cpipep == NULL)
351 return (ENOMEM);
352
353 cpipe = *cpipep;
354
355 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
356 cpipe->pipe_buffer.object = NULL;
357 #ifndef PIPE_NODIRECT
358 cpipe->pipe_map.kva = 0;
359 #endif
360 /*
361 * protect so pipeclose() doesn't follow a junk pointer
362 * if pipespace() fails.
363 */
364 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel));
365 cpipe->pipe_state = 0;
366 cpipe->pipe_peer = NULL;
367 cpipe->pipe_busy = 0;
368
369 #ifndef PIPE_NODIRECT
370 /*
371 * pipe data structure initializations to support direct pipe I/O
372 */
373 cpipe->pipe_map.cnt = 0;
374 cpipe->pipe_map.kva = 0;
375 cpipe->pipe_map.pos = 0;
376 cpipe->pipe_map.npages = 0;
377 /* cpipe->pipe_map.ms[] = invalid */
378 #endif
379
380 cpipe->pipe_mtxp = NULL; /* avoid pipespace assertion */
381 error = pipespace(cpipe, PIPE_SIZE);
382 if (error)
383 return (error);
384
385 vfs_timestamp(&cpipe->pipe_ctime);
386 cpipe->pipe_atime = cpipe->pipe_ctime;
387 cpipe->pipe_mtime = cpipe->pipe_ctime;
388
389 return (0);
390 }
391
392
393 /*
394 * lock a pipe for I/O, blocking other access
395 */
396 static __inline int
397 pipelock(cpipe, catch)
398 struct pipe *cpipe;
399 int catch;
400 {
401 int error;
402
403 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
404 while (cpipe->pipe_state & PIPE_LOCKFL) {
405 cpipe->pipe_state |= PIPE_LWANT;
406 error = msleep(cpipe, PIPE_MTX(cpipe),
407 catch ? (PRIBIO | PCATCH) : PRIBIO,
408 "pipelk", 0);
409 if (error != 0)
410 return (error);
411 }
412 cpipe->pipe_state |= PIPE_LOCKFL;
413 return (0);
414 }
415
416 /*
417 * unlock a pipe I/O lock
418 */
419 static __inline void
420 pipeunlock(cpipe)
421 struct pipe *cpipe;
422 {
423
424 PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
425 cpipe->pipe_state &= ~PIPE_LOCKFL;
426 if (cpipe->pipe_state & PIPE_LWANT) {
427 cpipe->pipe_state &= ~PIPE_LWANT;
428 wakeup(cpipe);
429 }
430 }
431
432 static __inline void
433 pipeselwakeup(cpipe)
434 struct pipe *cpipe;
435 {
436
437 if (cpipe->pipe_state & PIPE_SEL) {
438 cpipe->pipe_state &= ~PIPE_SEL;
439 selwakeup(&cpipe->pipe_sel);
440 }
441 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
442 pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
443 KNOTE(&cpipe->pipe_sel.si_note, 0);
444 }
445
446 /* ARGSUSED */
447 static int
448 pipe_read(fp, uio, active_cred, flags, td)
449 struct file *fp;
450 struct uio *uio;
451 struct ucred *active_cred;
452 struct thread *td;
453 int flags;
454 {
455 struct pipe *rpipe = fp->f_data;
456 int error;
457 int nread = 0;
458 u_int size;
459
460 PIPE_LOCK(rpipe);
461 ++rpipe->pipe_busy;
462 error = pipelock(rpipe, 1);
463 if (error)
464 goto unlocked_error;
465
466 #ifdef MAC
467 error = mac_check_pipe_read(active_cred, rpipe);
468 if (error)
469 goto locked_error;
470 #endif
471
472 while (uio->uio_resid) {
473 /*
474 * normal pipe buffer receive
475 */
476 if (rpipe->pipe_buffer.cnt > 0) {
477 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
478 if (size > rpipe->pipe_buffer.cnt)
479 size = rpipe->pipe_buffer.cnt;
480 if (size > (u_int) uio->uio_resid)
481 size = (u_int) uio->uio_resid;
482
483 PIPE_UNLOCK(rpipe);
484 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
485 size, uio);
486 PIPE_LOCK(rpipe);
487 if (error)
488 break;
489
490 rpipe->pipe_buffer.out += size;
491 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
492 rpipe->pipe_buffer.out = 0;
493
494 rpipe->pipe_buffer.cnt -= size;
495
496 /*
497 * If there is no more to read in the pipe, reset
498 * its pointers to the beginning. This improves
499 * cache hit stats.
500 */
501 if (rpipe->pipe_buffer.cnt == 0) {
502 rpipe->pipe_buffer.in = 0;
503 rpipe->pipe_buffer.out = 0;
504 }
505 nread += size;
506 #ifndef PIPE_NODIRECT
507 /*
508 * Direct copy, bypassing a kernel buffer.
509 */
510 } else if ((size = rpipe->pipe_map.cnt) &&
511 (rpipe->pipe_state & PIPE_DIRECTW)) {
512 caddr_t va;
513 if (size > (u_int) uio->uio_resid)
514 size = (u_int) uio->uio_resid;
515
516 va = (caddr_t) rpipe->pipe_map.kva +
517 rpipe->pipe_map.pos;
518 PIPE_UNLOCK(rpipe);
519 error = uiomove(va, size, uio);
520 PIPE_LOCK(rpipe);
521 if (error)
522 break;
523 nread += size;
524 rpipe->pipe_map.pos += size;
525 rpipe->pipe_map.cnt -= size;
526 if (rpipe->pipe_map.cnt == 0) {
527 rpipe->pipe_state &= ~PIPE_DIRECTW;
528 wakeup(rpipe);
529 }
530 #endif
531 } else {
532 /*
533 * detect EOF condition
534 * read returns 0 on EOF, no need to set error
535 */
536 if (rpipe->pipe_state & PIPE_EOF)
537 break;
538
539 /*
540 * If the "write-side" has been blocked, wake it up now.
541 */
542 if (rpipe->pipe_state & PIPE_WANTW) {
543 rpipe->pipe_state &= ~PIPE_WANTW;
544 wakeup(rpipe);
545 }
546
547 /*
548 * Break if some data was read.
549 */
550 if (nread > 0)
551 break;
552
553 /*
554 * Unlock the pipe buffer for our remaining processing. We
555 * will either break out with an error or we will sleep and
556 * relock to loop.
557 */
558 pipeunlock(rpipe);
559
560 /*
561 * Handle non-blocking mode operation or
562 * wait for more data.
563 */
564 if (fp->f_flag & FNONBLOCK) {
565 error = EAGAIN;
566 } else {
567 rpipe->pipe_state |= PIPE_WANTR;
568 if ((error = msleep(rpipe, PIPE_MTX(rpipe),
569 PRIBIO | PCATCH,
570 "piperd", 0)) == 0)
571 error = pipelock(rpipe, 1);
572 }
573 if (error)
574 goto unlocked_error;
575 }
576 }
577 #ifdef MAC
578 locked_error:
579 #endif
580 pipeunlock(rpipe);
581
582 /* XXX: should probably do this before getting any locks. */
583 if (error == 0)
584 vfs_timestamp(&rpipe->pipe_atime);
585 unlocked_error:
586 --rpipe->pipe_busy;
587
588 /*
589 * PIPE_WANT processing only makes sense if pipe_busy is 0.
590 */
591 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
592 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
593 wakeup(rpipe);
594 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
595 /*
596 * Handle write blocking hysteresis.
597 */
598 if (rpipe->pipe_state & PIPE_WANTW) {
599 rpipe->pipe_state &= ~PIPE_WANTW;
600 wakeup(rpipe);
601 }
602 }
603
604 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
605 pipeselwakeup(rpipe);
606
607 PIPE_UNLOCK(rpipe);
608 return (error);
609 }
610
611 #ifndef PIPE_NODIRECT
612 /*
613 * Map the sending processes' buffer into kernel space and wire it.
614 * This is similar to a physical write operation.
615 */
616 static int
617 pipe_build_write_buffer(wpipe, uio)
618 struct pipe *wpipe;
619 struct uio *uio;
620 {
621 u_int size;
622 int i;
623 vm_offset_t addr, endaddr;
624 vm_paddr_t paddr;
625
626 GIANT_REQUIRED;
627 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
628
629 size = (u_int) uio->uio_iov->iov_len;
630 if (size > wpipe->pipe_buffer.size)
631 size = wpipe->pipe_buffer.size;
632
633 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
634 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
635 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
636 vm_page_t m;
637
638 /*
639 * vm_fault_quick() can sleep. Consequently,
640 * vm_page_lock_queue() and vm_page_unlock_queue()
641 * should not be performed outside of this loop.
642 */
643 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
644 (paddr = pmap_extract(vmspace_pmap(curproc->p_vmspace),
645 addr)) == 0) {
646 int j;
647
648 vm_page_lock_queues();
649 for (j = 0; j < i; j++)
650 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
651 vm_page_unlock_queues();
652 return (EFAULT);
653 }
654
655 m = PHYS_TO_VM_PAGE(paddr);
656 vm_page_lock_queues();
657 vm_page_wire(m);
658 vm_page_unlock_queues();
659 wpipe->pipe_map.ms[i] = m;
660 }
661
662 /*
663 * set up the control block
664 */
665 wpipe->pipe_map.npages = i;
666 wpipe->pipe_map.pos =
667 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
668 wpipe->pipe_map.cnt = size;
669
670 /*
671 * and map the buffer
672 */
673 if (wpipe->pipe_map.kva == 0) {
674 /*
675 * We need to allocate space for an extra page because the
676 * address range might (will) span pages at times.
677 */
678 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
679 wpipe->pipe_buffer.size + PAGE_SIZE);
680 atomic_add_int(&amountpipekva,
681 wpipe->pipe_buffer.size + PAGE_SIZE);
682 }
683 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
684 wpipe->pipe_map.npages);
685
686 /*
687 * and update the uio data
688 */
689
690 uio->uio_iov->iov_len -= size;
691 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
692 if (uio->uio_iov->iov_len == 0)
693 uio->uio_iov++;
694 uio->uio_resid -= size;
695 uio->uio_offset += size;
696 return (0);
697 }
698
699 /*
700 * unmap and unwire the process buffer
701 */
702 static void
703 pipe_destroy_write_buffer(wpipe)
704 struct pipe *wpipe;
705 {
706 int i;
707
708 GIANT_REQUIRED;
709 PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
710
711 if (wpipe->pipe_map.kva) {
712 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
713
714 if (amountpipekva > MAXPIPEKVA) {
715 vm_offset_t kva = wpipe->pipe_map.kva;
716 wpipe->pipe_map.kva = 0;
717 kmem_free(kernel_map, kva,
718 wpipe->pipe_buffer.size + PAGE_SIZE);
719 atomic_subtract_int(&amountpipekva,
720 wpipe->pipe_buffer.size + PAGE_SIZE);
721 }
722 }
723 vm_page_lock_queues();
724 for (i = 0; i < wpipe->pipe_map.npages; i++)
725 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
726 vm_page_unlock_queues();
727 wpipe->pipe_map.npages = 0;
728 }
729
730 /*
731 * In the case of a signal, the writing process might go away. This
732 * code copies the data into the circular buffer so that the source
733 * pages can be freed without loss of data.
734 */
735 static void
736 pipe_clone_write_buffer(wpipe)
737 struct pipe *wpipe;
738 {
739 int size;
740 int pos;
741
742 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
743 size = wpipe->pipe_map.cnt;
744 pos = wpipe->pipe_map.pos;
745
746 wpipe->pipe_buffer.in = size;
747 wpipe->pipe_buffer.out = 0;
748 wpipe->pipe_buffer.cnt = size;
749 wpipe->pipe_state &= ~PIPE_DIRECTW;
750
751 PIPE_GET_GIANT(wpipe);
752 bcopy((caddr_t) wpipe->pipe_map.kva + pos,
753 wpipe->pipe_buffer.buffer, size);
754 pipe_destroy_write_buffer(wpipe);
755 PIPE_DROP_GIANT(wpipe);
756 }
757
758 /*
759 * This implements the pipe buffer write mechanism. Note that only
760 * a direct write OR a normal pipe write can be pending at any given time.
761 * If there are any characters in the pipe buffer, the direct write will
762 * be deferred until the receiving process grabs all of the bytes from
763 * the pipe buffer. Then the direct mapping write is set-up.
764 */
765 static int
766 pipe_direct_write(wpipe, uio)
767 struct pipe *wpipe;
768 struct uio *uio;
769 {
770 int error;
771
772 retry:
773 PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
774 while (wpipe->pipe_state & PIPE_DIRECTW) {
775 if (wpipe->pipe_state & PIPE_WANTR) {
776 wpipe->pipe_state &= ~PIPE_WANTR;
777 wakeup(wpipe);
778 }
779 wpipe->pipe_state |= PIPE_WANTW;
780 error = msleep(wpipe, PIPE_MTX(wpipe),
781 PRIBIO | PCATCH, "pipdww", 0);
782 if (error)
783 goto error1;
784 if (wpipe->pipe_state & PIPE_EOF) {
785 error = EPIPE;
786 goto error1;
787 }
788 }
789 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
790 if (wpipe->pipe_buffer.cnt > 0) {
791 if (wpipe->pipe_state & PIPE_WANTR) {
792 wpipe->pipe_state &= ~PIPE_WANTR;
793 wakeup(wpipe);
794 }
795
796 wpipe->pipe_state |= PIPE_WANTW;
797 error = msleep(wpipe, PIPE_MTX(wpipe),
798 PRIBIO | PCATCH, "pipdwc", 0);
799 if (error)
800 goto error1;
801 if (wpipe->pipe_state & PIPE_EOF) {
802 error = EPIPE;
803 goto error1;
804 }
805 goto retry;
806 }
807
808 wpipe->pipe_state |= PIPE_DIRECTW;
809
810 pipelock(wpipe, 0);
811 PIPE_GET_GIANT(wpipe);
812 error = pipe_build_write_buffer(wpipe, uio);
813 PIPE_DROP_GIANT(wpipe);
814 pipeunlock(wpipe);
815 if (error) {
816 wpipe->pipe_state &= ~PIPE_DIRECTW;
817 goto error1;
818 }
819
820 error = 0;
821 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
822 if (wpipe->pipe_state & PIPE_EOF) {
823 pipelock(wpipe, 0);
824 PIPE_GET_GIANT(wpipe);
825 pipe_destroy_write_buffer(wpipe);
826 PIPE_DROP_GIANT(wpipe);
827 pipeselwakeup(wpipe);
828 pipeunlock(wpipe);
829 error = EPIPE;
830 goto error1;
831 }
832 if (wpipe->pipe_state & PIPE_WANTR) {
833 wpipe->pipe_state &= ~PIPE_WANTR;
834 wakeup(wpipe);
835 }
836 pipeselwakeup(wpipe);
837 error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
838 "pipdwt", 0);
839 }
840
841 pipelock(wpipe,0);
842 if (wpipe->pipe_state & PIPE_DIRECTW) {
843 /*
844 * this bit of trickery substitutes a kernel buffer for
845 * the process that might be going away.
846 */
847 pipe_clone_write_buffer(wpipe);
848 } else {
849 PIPE_GET_GIANT(wpipe);
850 pipe_destroy_write_buffer(wpipe);
851 PIPE_DROP_GIANT(wpipe);
852 }
853 pipeunlock(wpipe);
854 return (error);
855
856 error1:
857 wakeup(wpipe);
858 return (error);
859 }
860 #endif
861
862 static int
863 pipe_write(fp, uio, active_cred, flags, td)
864 struct file *fp;
865 struct uio *uio;
866 struct ucred *active_cred;
867 struct thread *td;
868 int flags;
869 {
870 int error = 0;
871 int orig_resid;
872 struct pipe *wpipe, *rpipe;
873
874 rpipe = fp->f_data;
875 wpipe = rpipe->pipe_peer;
876
877 PIPE_LOCK(rpipe);
878 /*
879 * detect loss of pipe read side, issue SIGPIPE if lost.
880 */
881 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
882 PIPE_UNLOCK(rpipe);
883 return (EPIPE);
884 }
885 #ifdef MAC
886 error = mac_check_pipe_write(active_cred, wpipe);
887 if (error) {
888 PIPE_UNLOCK(rpipe);
889 return (error);
890 }
891 #endif
892 ++wpipe->pipe_busy;
893
894 /*
895 * If it is advantageous to resize the pipe buffer, do
896 * so.
897 */
898 if ((uio->uio_resid > PIPE_SIZE) &&
899 (nbigpipe < LIMITBIGPIPES) &&
900 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
901 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
902 (wpipe->pipe_buffer.cnt == 0)) {
903
904 if ((error = pipelock(wpipe, 1)) == 0) {
905 PIPE_GET_GIANT(wpipe);
906 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
907 nbigpipe++;
908 PIPE_DROP_GIANT(wpipe);
909 pipeunlock(wpipe);
910 }
911 }
912
913 /*
914 * If an early error occured unbusy and return, waking up any pending
915 * readers.
916 */
917 if (error) {
918 --wpipe->pipe_busy;
919 if ((wpipe->pipe_busy == 0) &&
920 (wpipe->pipe_state & PIPE_WANT)) {
921 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
922 wakeup(wpipe);
923 }
924 PIPE_UNLOCK(rpipe);
925 return(error);
926 }
927
928 orig_resid = uio->uio_resid;
929
930 while (uio->uio_resid) {
931 int space;
932
933 #ifndef PIPE_NODIRECT
934 /*
935 * If the transfer is large, we can gain performance if
936 * we do process-to-process copies directly.
937 * If the write is non-blocking, we don't use the
938 * direct write mechanism.
939 *
940 * The direct write mechanism will detect the reader going
941 * away on us.
942 */
943 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
944 (fp->f_flag & FNONBLOCK) == 0 &&
945 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
946 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
947 error = pipe_direct_write(wpipe, uio);
948 if (error)
949 break;
950 continue;
951 }
952 #endif
953
954 /*
955 * Pipe buffered writes cannot be coincidental with
956 * direct writes. We wait until the currently executing
957 * direct write is completed before we start filling the
958 * pipe buffer. We break out if a signal occurs or the
959 * reader goes away.
960 */
961 retrywrite:
962 while (wpipe->pipe_state & PIPE_DIRECTW) {
963 if (wpipe->pipe_state & PIPE_WANTR) {
964 wpipe->pipe_state &= ~PIPE_WANTR;
965 wakeup(wpipe);
966 }
967 error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
968 "pipbww", 0);
969 if (wpipe->pipe_state & PIPE_EOF)
970 break;
971 if (error)
972 break;
973 }
974 if (wpipe->pipe_state & PIPE_EOF) {
975 error = EPIPE;
976 break;
977 }
978
979 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
980
981 /* Writes of size <= PIPE_BUF must be atomic. */
982 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
983 space = 0;
984
985 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
986 if ((error = pipelock(wpipe,1)) == 0) {
987 int size; /* Transfer size */
988 int segsize; /* first segment to transfer */
989
990 /*
991 * It is possible for a direct write to
992 * slip in on us... handle it here...
993 */
994 if (wpipe->pipe_state & PIPE_DIRECTW) {
995 pipeunlock(wpipe);
996 goto retrywrite;
997 }
998 /*
999 * If a process blocked in uiomove, our
1000 * value for space might be bad.
1001 *
1002 * XXX will we be ok if the reader has gone
1003 * away here?
1004 */
1005 if (space > wpipe->pipe_buffer.size -
1006 wpipe->pipe_buffer.cnt) {
1007 pipeunlock(wpipe);
1008 goto retrywrite;
1009 }
1010
1011 /*
1012 * Transfer size is minimum of uio transfer
1013 * and free space in pipe buffer.
1014 */
1015 if (space > uio->uio_resid)
1016 size = uio->uio_resid;
1017 else
1018 size = space;
1019 /*
1020 * First segment to transfer is minimum of
1021 * transfer size and contiguous space in
1022 * pipe buffer. If first segment to transfer
1023 * is less than the transfer size, we've got
1024 * a wraparound in the buffer.
1025 */
1026 segsize = wpipe->pipe_buffer.size -
1027 wpipe->pipe_buffer.in;
1028 if (segsize > size)
1029 segsize = size;
1030
1031 /* Transfer first segment */
1032
1033 PIPE_UNLOCK(rpipe);
1034 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1035 segsize, uio);
1036 PIPE_LOCK(rpipe);
1037
1038 if (error == 0 && segsize < size) {
1039 /*
1040 * Transfer remaining part now, to
1041 * support atomic writes. Wraparound
1042 * happened.
1043 */
1044 if (wpipe->pipe_buffer.in + segsize !=
1045 wpipe->pipe_buffer.size)
1046 panic("Expected pipe buffer wraparound disappeared");
1047
1048 PIPE_UNLOCK(rpipe);
1049 error = uiomove(&wpipe->pipe_buffer.buffer[0],
1050 size - segsize, uio);
1051 PIPE_LOCK(rpipe);
1052 }
1053 if (error == 0) {
1054 wpipe->pipe_buffer.in += size;
1055 if (wpipe->pipe_buffer.in >=
1056 wpipe->pipe_buffer.size) {
1057 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
1058 panic("Expected wraparound bad");
1059 wpipe->pipe_buffer.in = size - segsize;
1060 }
1061
1062 wpipe->pipe_buffer.cnt += size;
1063 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
1064 panic("Pipe buffer overflow");
1065
1066 }
1067 pipeunlock(wpipe);
1068 }
1069 if (error)
1070 break;
1071
1072 } else {
1073 /*
1074 * If the "read-side" has been blocked, wake it up now.
1075 */
1076 if (wpipe->pipe_state & PIPE_WANTR) {
1077 wpipe->pipe_state &= ~PIPE_WANTR;
1078 wakeup(wpipe);
1079 }
1080
1081 /*
1082 * don't block on non-blocking I/O
1083 */
1084 if (fp->f_flag & FNONBLOCK) {
1085 error = EAGAIN;
1086 break;
1087 }
1088
1089 /*
1090 * We have no more space and have something to offer,
1091 * wake up select/poll.
1092 */
1093 pipeselwakeup(wpipe);
1094
1095 wpipe->pipe_state |= PIPE_WANTW;
1096 error = msleep(wpipe, PIPE_MTX(rpipe),
1097 PRIBIO | PCATCH, "pipewr", 0);
1098 if (error != 0)
1099 break;
1100 /*
1101 * If read side wants to go away, we just issue a signal
1102 * to ourselves.
1103 */
1104 if (wpipe->pipe_state & PIPE_EOF) {
1105 error = EPIPE;
1106 break;
1107 }
1108 }
1109 }
1110
1111 --wpipe->pipe_busy;
1112
1113 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1114 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1115 wakeup(wpipe);
1116 } else if (wpipe->pipe_buffer.cnt > 0) {
1117 /*
1118 * If we have put any characters in the buffer, we wake up
1119 * the reader.
1120 */
1121 if (wpipe->pipe_state & PIPE_WANTR) {
1122 wpipe->pipe_state &= ~PIPE_WANTR;
1123 wakeup(wpipe);
1124 }
1125 }
1126
1127 /*
1128 * Don't return EPIPE if I/O was successful
1129 */
1130 if ((wpipe->pipe_buffer.cnt == 0) &&
1131 (uio->uio_resid == 0) &&
1132 (error == EPIPE)) {
1133 error = 0;
1134 }
1135
1136 if (error == 0)
1137 vfs_timestamp(&wpipe->pipe_mtime);
1138
1139 /*
1140 * We have something to offer,
1141 * wake up select/poll.
1142 */
1143 if (wpipe->pipe_buffer.cnt)
1144 pipeselwakeup(wpipe);
1145
1146 PIPE_UNLOCK(rpipe);
1147 return (error);
1148 }
1149
1150 /*
1151 * we implement a very minimal set of ioctls for compatibility with sockets.
1152 */
1153 static int
1154 pipe_ioctl(fp, cmd, data, active_cred, td)
1155 struct file *fp;
1156 u_long cmd;
1157 void *data;
1158 struct ucred *active_cred;
1159 struct thread *td;
1160 {
1161 struct pipe *mpipe = fp->f_data;
1162 #ifdef MAC
1163 int error;
1164 #endif
1165
1166 PIPE_LOCK(mpipe);
1167
1168 #ifdef MAC
1169 error = mac_check_pipe_ioctl(active_cred, mpipe, cmd, data);
1170 if (error)
1171 return (error);
1172 #endif
1173
1174 switch (cmd) {
1175
1176 case FIONBIO:
1177 PIPE_UNLOCK(mpipe);
1178 return (0);
1179
1180 case FIOASYNC:
1181 if (*(int *)data) {
1182 mpipe->pipe_state |= PIPE_ASYNC;
1183 } else {
1184 mpipe->pipe_state &= ~PIPE_ASYNC;
1185 }
1186 PIPE_UNLOCK(mpipe);
1187 return (0);
1188
1189 case FIONREAD:
1190 if (mpipe->pipe_state & PIPE_DIRECTW)
1191 *(int *)data = mpipe->pipe_map.cnt;
1192 else
1193 *(int *)data = mpipe->pipe_buffer.cnt;
1194 PIPE_UNLOCK(mpipe);
1195 return (0);
1196
1197 case FIOSETOWN:
1198 PIPE_UNLOCK(mpipe);
1199 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1200
1201 case FIOGETOWN:
1202 PIPE_UNLOCK(mpipe);
1203 *(int *)data = fgetown(&mpipe->pipe_sigio);
1204 return (0);
1205
1206 /* This is deprecated, FIOSETOWN should be used instead. */
1207 case TIOCSPGRP:
1208 PIPE_UNLOCK(mpipe);
1209 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1210
1211 /* This is deprecated, FIOGETOWN should be used instead. */
1212 case TIOCGPGRP:
1213 PIPE_UNLOCK(mpipe);
1214 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1215 return (0);
1216
1217 }
1218 PIPE_UNLOCK(mpipe);
1219 return (ENOTTY);
1220 }
1221
1222 static int
1223 pipe_poll(fp, events, active_cred, td)
1224 struct file *fp;
1225 int events;
1226 struct ucred *active_cred;
1227 struct thread *td;
1228 {
1229 struct pipe *rpipe = fp->f_data;
1230 struct pipe *wpipe;
1231 int revents = 0;
1232 #ifdef MAC
1233 int error;
1234 #endif
1235
1236 wpipe = rpipe->pipe_peer;
1237 PIPE_LOCK(rpipe);
1238 #ifdef MAC
1239 error = mac_check_pipe_poll(active_cred, rpipe);
1240 if (error)
1241 goto locked_error;
1242 #endif
1243 if (events & (POLLIN | POLLRDNORM))
1244 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1245 (rpipe->pipe_buffer.cnt > 0) ||
1246 (rpipe->pipe_state & PIPE_EOF))
1247 revents |= events & (POLLIN | POLLRDNORM);
1248
1249 if (events & (POLLOUT | POLLWRNORM))
1250 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
1251 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1252 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1253 revents |= events & (POLLOUT | POLLWRNORM);
1254
1255 if ((rpipe->pipe_state & PIPE_EOF) ||
1256 (wpipe == NULL) ||
1257 (wpipe->pipe_state & PIPE_EOF))
1258 revents |= POLLHUP;
1259
1260 if (revents == 0) {
1261 if (events & (POLLIN | POLLRDNORM)) {
1262 selrecord(td, &rpipe->pipe_sel);
1263 rpipe->pipe_state |= PIPE_SEL;
1264 }
1265
1266 if (events & (POLLOUT | POLLWRNORM)) {
1267 selrecord(td, &wpipe->pipe_sel);
1268 wpipe->pipe_state |= PIPE_SEL;
1269 }
1270 }
1271 #ifdef MAC
1272 locked_error:
1273 #endif
1274 PIPE_UNLOCK(rpipe);
1275
1276 return (revents);
1277 }
1278
1279 /*
1280 * We shouldn't need locks here as we're doing a read and this should
1281 * be a natural race.
1282 */
1283 static int
1284 pipe_stat(fp, ub, active_cred, td)
1285 struct file *fp;
1286 struct stat *ub;
1287 struct ucred *active_cred;
1288 struct thread *td;
1289 {
1290 struct pipe *pipe = fp->f_data;
1291 #ifdef MAC
1292 int error;
1293
1294 PIPE_LOCK(pipe);
1295 error = mac_check_pipe_stat(active_cred, pipe);
1296 PIPE_UNLOCK(pipe);
1297 if (error)
1298 return (error);
1299 #endif
1300 bzero(ub, sizeof(*ub));
1301 ub->st_mode = S_IFIFO;
1302 ub->st_blksize = pipe->pipe_buffer.size;
1303 ub->st_size = pipe->pipe_buffer.cnt;
1304 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1305 ub->st_atimespec = pipe->pipe_atime;
1306 ub->st_mtimespec = pipe->pipe_mtime;
1307 ub->st_ctimespec = pipe->pipe_ctime;
1308 ub->st_uid = fp->f_cred->cr_uid;
1309 ub->st_gid = fp->f_cred->cr_gid;
1310 /*
1311 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1312 * XXX (st_dev, st_ino) should be unique.
1313 */
1314 return (0);
1315 }
1316
1317 /* ARGSUSED */
1318 static int
1319 pipe_close(fp, td)
1320 struct file *fp;
1321 struct thread *td;
1322 {
1323 struct pipe *cpipe = fp->f_data;
1324
1325 fp->f_ops = &badfileops;
1326 fp->f_data = NULL;
1327 funsetown(&cpipe->pipe_sigio);
1328 pipeclose(cpipe);
1329 return (0);
1330 }
1331
1332 static void
1333 pipe_free_kmem(cpipe)
1334 struct pipe *cpipe;
1335 {
1336
1337 GIANT_REQUIRED;
1338 KASSERT(cpipe->pipe_mtxp == NULL || !mtx_owned(PIPE_MTX(cpipe)),
1339 ("pipespace: pipe mutex locked"));
1340
1341 if (cpipe->pipe_buffer.buffer != NULL) {
1342 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1343 --nbigpipe;
1344 atomic_subtract_int(&amountpipekva, cpipe->pipe_buffer.size);
1345 kmem_free(kernel_map,
1346 (vm_offset_t)cpipe->pipe_buffer.buffer,
1347 cpipe->pipe_buffer.size);
1348 cpipe->pipe_buffer.buffer = NULL;
1349 }
1350 #ifndef PIPE_NODIRECT
1351 if (cpipe->pipe_map.kva != 0) {
1352 atomic_subtract_int(&amountpipekva,
1353 cpipe->pipe_buffer.size + PAGE_SIZE);
1354 kmem_free(kernel_map,
1355 cpipe->pipe_map.kva,
1356 cpipe->pipe_buffer.size + PAGE_SIZE);
1357 cpipe->pipe_map.cnt = 0;
1358 cpipe->pipe_map.kva = 0;
1359 cpipe->pipe_map.pos = 0;
1360 cpipe->pipe_map.npages = 0;
1361 }
1362 #endif
1363 }
1364
1365 /*
1366 * shutdown the pipe
1367 */
1368 static void
1369 pipeclose(cpipe)
1370 struct pipe *cpipe;
1371 {
1372 struct pipe *ppipe;
1373 int hadpeer;
1374
1375 if (cpipe == NULL)
1376 return;
1377
1378 hadpeer = 0;
1379
1380 /* partially created pipes won't have a valid mutex. */
1381 if (PIPE_MTX(cpipe) != NULL)
1382 PIPE_LOCK(cpipe);
1383
1384 pipeselwakeup(cpipe);
1385
1386 /*
1387 * If the other side is blocked, wake it up saying that
1388 * we want to close it down.
1389 */
1390 while (cpipe->pipe_busy) {
1391 wakeup(cpipe);
1392 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
1393 msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1394 }
1395
1396 #ifdef MAC
1397 if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL)
1398 mac_destroy_pipe(cpipe);
1399 #endif
1400
1401 /*
1402 * Disconnect from peer
1403 */
1404 if ((ppipe = cpipe->pipe_peer) != NULL) {
1405 hadpeer++;
1406 pipeselwakeup(ppipe);
1407
1408 ppipe->pipe_state |= PIPE_EOF;
1409 wakeup(ppipe);
1410 KNOTE(&ppipe->pipe_sel.si_note, 0);
1411 ppipe->pipe_peer = NULL;
1412 }
1413 /*
1414 * free resources
1415 */
1416 if (PIPE_MTX(cpipe) != NULL) {
1417 PIPE_UNLOCK(cpipe);
1418 if (!hadpeer) {
1419 mtx_destroy(PIPE_MTX(cpipe));
1420 free(PIPE_MTX(cpipe), M_TEMP);
1421 }
1422 }
1423 mtx_lock(&Giant);
1424 pipe_free_kmem(cpipe);
1425 uma_zfree(pipe_zone, cpipe);
1426 mtx_unlock(&Giant);
1427 }
1428
1429 /*ARGSUSED*/
1430 static int
1431 pipe_kqfilter(struct file *fp, struct knote *kn)
1432 {
1433 struct pipe *cpipe;
1434
1435 cpipe = kn->kn_fp->f_data;
1436 switch (kn->kn_filter) {
1437 case EVFILT_READ:
1438 kn->kn_fop = &pipe_rfiltops;
1439 break;
1440 case EVFILT_WRITE:
1441 kn->kn_fop = &pipe_wfiltops;
1442 cpipe = cpipe->pipe_peer;
1443 if (cpipe == NULL)
1444 /* other end of pipe has been closed */
1445 return (EBADF);
1446 break;
1447 default:
1448 return (1);
1449 }
1450 kn->kn_hook = cpipe;
1451
1452 PIPE_LOCK(cpipe);
1453 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1454 PIPE_UNLOCK(cpipe);
1455 return (0);
1456 }
1457
1458 static void
1459 filt_pipedetach(struct knote *kn)
1460 {
1461 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1462
1463 PIPE_LOCK(cpipe);
1464 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1465 PIPE_UNLOCK(cpipe);
1466 }
1467
1468 /*ARGSUSED*/
1469 static int
1470 filt_piperead(struct knote *kn, long hint)
1471 {
1472 struct pipe *rpipe = kn->kn_fp->f_data;
1473 struct pipe *wpipe = rpipe->pipe_peer;
1474
1475 PIPE_LOCK(rpipe);
1476 kn->kn_data = rpipe->pipe_buffer.cnt;
1477 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1478 kn->kn_data = rpipe->pipe_map.cnt;
1479
1480 if ((rpipe->pipe_state & PIPE_EOF) ||
1481 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1482 kn->kn_flags |= EV_EOF;
1483 PIPE_UNLOCK(rpipe);
1484 return (1);
1485 }
1486 PIPE_UNLOCK(rpipe);
1487 return (kn->kn_data > 0);
1488 }
1489
1490 /*ARGSUSED*/
1491 static int
1492 filt_pipewrite(struct knote *kn, long hint)
1493 {
1494 struct pipe *rpipe = kn->kn_fp->f_data;
1495 struct pipe *wpipe = rpipe->pipe_peer;
1496
1497 PIPE_LOCK(rpipe);
1498 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1499 kn->kn_data = 0;
1500 kn->kn_flags |= EV_EOF;
1501 PIPE_UNLOCK(rpipe);
1502 return (1);
1503 }
1504 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1505 if (wpipe->pipe_state & PIPE_DIRECTW)
1506 kn->kn_data = 0;
1507
1508 PIPE_UNLOCK(rpipe);
1509 return (kn->kn_data >= PIPE_BUF);
1510 }
Cache object: 8a861610d3e9dd0425027e3e82b80f94
|