xref: /openbsd/sys/kern/sys_pipe.c (revision 133306f0)
1 /*	$OpenBSD: sys_pipe.c,v 1.25 2000/11/16 20:02:17 provos Exp $	*/
2 
3 /*
4  * Copyright (c) 1996 John S. Dyson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Absolutely no warranty of function or purpose is made by the author
17  *    John S. Dyson.
18  * 4. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  */
21 
22 #ifndef OLD_PIPE
23 
24 /*
25  * This file contains a high-performance replacement for the socket-based
26  * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
27  * all features of sockets, but does do everything that pipes normally
28  * do.
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/proc.h>
34 #include <sys/file.h>
35 #include <sys/protosw.h>
36 #include <sys/stat.h>
37 #include <sys/filedesc.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/stat.h>
41 #include <sys/select.h>
42 #include <sys/signalvar.h>
43 #include <sys/errno.h>
44 #include <sys/queue.h>
45 #include <sys/kernel.h>
46 #include <sys/mount.h>
47 #include <sys/syscallargs.h>
48 #include <sys/event.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_prot.h>
52 #include <vm/vm_param.h>
53 #include <sys/lock.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_extern.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 
61 #include <sys/pipe.h>
62 
63 /*
64  * interfaces to the outside world
65  */
66 int	pipe_read __P((struct file *, off_t *, struct uio *, struct ucred *));
67 int	pipe_write __P((struct file *, off_t *, struct uio *, struct ucred *));
68 int	pipe_close __P((struct file *, struct proc *));
69 int	pipe_select __P((struct file *, int which, struct proc *));
70 int	pipe_ioctl __P((struct file *, u_long, caddr_t, struct proc *));
71 
72 static struct fileops pipeops =
73     { pipe_read, pipe_write, pipe_ioctl, pipe_select, pipe_close };
74 
75 int	filt_pipeattach(struct knote *kn);
76 void	filt_pipedetach(struct knote *kn);
77 int	filt_piperead(struct knote *kn, long hint);
78 int	filt_pipewrite(struct knote *kn, long hint);
79 
80 struct filterops pipe_rwfiltops[] = {
81 	{ 1, filt_pipeattach, filt_pipedetach, filt_piperead },
82 	{ 1, filt_pipeattach, filt_pipedetach, filt_pipewrite },
83 };
84 
85 /*
86  * Default pipe buffer size(s), this can be kind-of large now because pipe
87  * space is pageable.  The pipe code will try to maintain locality of
88  * reference for performance reasons, so small amounts of outstanding I/O
89  * will not wipe the cache.
90  */
91 #define MINPIPESIZE (PIPE_SIZE/3)
92 
93 /*
94  * Limit the number of "big" pipes
95  */
96 #define LIMITBIGPIPES	32
97 int nbigpipe;
98 
99 static int amountpipekva;
100 
101 void	pipeclose __P((struct pipe *));
102 void	pipeinit __P((struct pipe *));
103 int	pipe_stat __P((struct pipe *, struct stat *));
104 static __inline int pipelock __P((struct pipe *, int));
105 static __inline void pipeunlock __P((struct pipe *));
106 static __inline void pipeselwakeup __P((struct pipe *));
107 void	pipespace __P((struct pipe *));
108 
109 /*
110  * The pipe system call for the DTYPE_PIPE type of pipes
111  */
112 
113 /* ARGSUSED */
114 int
115 sys_opipe(p, v, retval)
116 	struct proc *p;
117 	void *v;
118 	register_t *retval;
119 {
120 	struct filedesc *fdp = p->p_fd;
121 	struct file *rf, *wf;
122 	struct pipe *rpipe, *wpipe;
123 	int fd, error;
124 
125 	rpipe = malloc(sizeof(*rpipe), M_PIPE, M_WAITOK);
126 	pipeinit(rpipe);
127 	wpipe = malloc(sizeof(*wpipe), M_PIPE, M_WAITOK);
128 	pipeinit(wpipe);
129 
130 	error = falloc(p, &rf, &fd);
131 	if (error)
132 		goto free2;
133 	rf->f_flag = FREAD | FWRITE;
134 	rf->f_type = DTYPE_PIPE;
135 	rf->f_ops = &pipeops;
136 	rf->f_data = (caddr_t)rpipe;
137 	retval[0] = fd;
138 
139 	error = falloc(p, &wf, &fd);
140 	if (error)
141 		goto free3;
142 	wf->f_flag = FREAD | FWRITE;
143 	wf->f_type = DTYPE_PIPE;
144 	wf->f_ops = &pipeops;
145 	wf->f_data = (caddr_t)wpipe;
146 	retval[1] = fd;
147 
148 	rpipe->pipe_peer = wpipe;
149 	wpipe->pipe_peer = rpipe;
150 
151 	return (0);
152 free3:
153 	ffree(rf);
154 	fdremove(fdp, retval[0]);
155 free2:
156 	(void)pipeclose(wpipe);
157 	(void)pipeclose(rpipe);
158 	return (error);
159 }
160 
161 /*
162  * Allocate kva for pipe circular buffer, the space is pageable
163  */
164 void
165 pipespace(cpipe)
166 	struct pipe *cpipe;
167 {
168 #if defined(UVM)
169 	/* XXX - this is wrong, use an aobj instead */
170 	cpipe->pipe_buffer.buffer = (caddr_t) uvm_km_valloc(kernel_map,
171 						cpipe->pipe_buffer.size);
172 	if (cpipe->pipe_buffer.buffer == NULL)
173 		panic("pipespace: out of kvm");
174 #else
175 	int npages, error;
176 
177 	npages = round_page(cpipe->pipe_buffer.size)/PAGE_SIZE;
178 	/*
179 	 * Create an object, I don't like the idea of paging to/from
180 	 * kernel_object.
181 	 */
182 	cpipe->pipe_buffer.object = vm_object_allocate(npages);
183 	cpipe->pipe_buffer.buffer = (caddr_t) vm_map_min(kernel_map);
184 
185 	/*
186 	 * Insert the object into the kernel map, and allocate kva for it.
187 	 * The map entry is, by default, pageable.
188 	 */
189 	error = vm_map_find(kernel_map, cpipe->pipe_buffer.object, 0,
190 		(vaddr_t *) &cpipe->pipe_buffer.buffer,
191 		cpipe->pipe_buffer.size, 1);
192 	if (error != KERN_SUCCESS)
193 		panic("pipespace: out of kvm");
194 #endif
195 	amountpipekva += cpipe->pipe_buffer.size;
196 }
197 
198 /*
199  * initialize and allocate VM and memory for pipe
200  */
201 void
202 pipeinit(cpipe)
203 	struct pipe *cpipe;
204 {
205 	int s;
206 
207 	cpipe->pipe_buffer.in = 0;
208 	cpipe->pipe_buffer.out = 0;
209 	cpipe->pipe_buffer.cnt = 0;
210 	cpipe->pipe_buffer.size = PIPE_SIZE;
211 
212 	/* Buffer kva gets dynamically allocated */
213 	cpipe->pipe_buffer.buffer = NULL;
214 	/* cpipe->pipe_buffer.object = invalid */
215 
216 	cpipe->pipe_state = 0;
217 	cpipe->pipe_peer = NULL;
218 	cpipe->pipe_busy = 0;
219 	s = splhigh();
220 	cpipe->pipe_ctime = time;
221 	cpipe->pipe_atime = time;
222 	cpipe->pipe_mtime = time;
223 	splx(s);
224 	bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel);
225 	cpipe->pipe_pgid = NO_PID;
226 }
227 
228 
229 /*
230  * lock a pipe for I/O, blocking other access
231  */
232 static __inline int
233 pipelock(cpipe, catch)
234 	struct pipe *cpipe;
235 	int catch;
236 {
237 	int error;
238 	while (cpipe->pipe_state & PIPE_LOCK) {
239 		cpipe->pipe_state |= PIPE_LWANT;
240 		error = tsleep(cpipe, catch ? PRIBIO|PCATCH : PRIBIO,
241 		    "pipelk", 0);
242 		if (error)
243 			return error;
244 	}
245 	cpipe->pipe_state |= PIPE_LOCK;
246 	return 0;
247 }
248 
249 /*
250  * unlock a pipe I/O lock
251  */
252 static __inline void
253 pipeunlock(cpipe)
254 	struct pipe *cpipe;
255 {
256 	cpipe->pipe_state &= ~PIPE_LOCK;
257 	if (cpipe->pipe_state & PIPE_LWANT) {
258 		cpipe->pipe_state &= ~PIPE_LWANT;
259 		wakeup(cpipe);
260 	}
261 }
262 
263 static __inline void
264 pipeselwakeup(cpipe)
265 	struct pipe *cpipe;
266 {
267 	if (cpipe->pipe_state & PIPE_SEL) {
268 		cpipe->pipe_state &= ~PIPE_SEL;
269 		selwakeup(&cpipe->pipe_sel);
270 	}
271 	if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_pgid != NO_PID)
272 		gsignal(cpipe->pipe_pgid, SIGIO);
273 	KNOTE(&cpipe->pipe_sel.si_note, 0);
274 }
275 
276 /* ARGSUSED */
277 int
278 pipe_read(fp, poff, uio, cred)
279 	struct file *fp;
280 	off_t *poff;
281 	struct uio *uio;
282 	struct ucred *cred;
283 {
284 	struct pipe *rpipe = (struct pipe *) fp->f_data;
285 	int error = 0;
286 	int nread = 0;
287 	int size;
288 
289 	++rpipe->pipe_busy;
290 	while (uio->uio_resid) {
291 		/*
292 		 * normal pipe buffer receive
293 		 */
294 		if (rpipe->pipe_buffer.cnt > 0) {
295 			size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
296 			if (size > rpipe->pipe_buffer.cnt)
297 				size = rpipe->pipe_buffer.cnt;
298 			if (size > uio->uio_resid)
299 				size = uio->uio_resid;
300 			if ((error = pipelock(rpipe,1)) == 0) {
301 				error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
302 					size, uio);
303 				pipeunlock(rpipe);
304 			}
305 			if (error) {
306 				break;
307 			}
308 			rpipe->pipe_buffer.out += size;
309 			if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
310 				rpipe->pipe_buffer.out = 0;
311 
312 			rpipe->pipe_buffer.cnt -= size;
313 			nread += size;
314 		} else {
315 			/*
316 			 * detect EOF condition
317 			 */
318 			if (rpipe->pipe_state & PIPE_EOF) {
319 				/* XXX error = ? */
320 				break;
321 			}
322 			/*
323 			 * If the "write-side" has been blocked, wake it up now.
324 			 */
325 			if (rpipe->pipe_state & PIPE_WANTW) {
326 				rpipe->pipe_state &= ~PIPE_WANTW;
327 				wakeup(rpipe);
328 			}
329 			if (nread > 0)
330 				break;
331 
332 			if (fp->f_flag & FNONBLOCK) {
333 				error = EAGAIN;
334 				break;
335 			}
336 
337 			/*
338 			 * If there is no more to read in the pipe, reset
339 			 * its pointers to the beginning.  This improves
340 			 * cache hit stats.
341 			 */
342 
343 			if ((error = pipelock(rpipe,1)) == 0) {
344 				if (rpipe->pipe_buffer.cnt == 0) {
345 					rpipe->pipe_buffer.in = 0;
346 					rpipe->pipe_buffer.out = 0;
347 				}
348 				pipeunlock(rpipe);
349 			} else {
350 				break;
351 			}
352 
353 			if (rpipe->pipe_state & PIPE_WANTW) {
354 				rpipe->pipe_state &= ~PIPE_WANTW;
355 				wakeup(rpipe);
356 			}
357 
358 			rpipe->pipe_state |= PIPE_WANTR;
359 			error = tsleep(rpipe, PRIBIO|PCATCH, "piperd", 0);
360 			if (error)
361 				break;
362 		}
363 	}
364 
365 	if (error == 0) {
366 		int s = splhigh();
367 		rpipe->pipe_atime = time;
368 		splx(s);
369 	}
370 
371 	--rpipe->pipe_busy;
372 	if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
373 		rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
374 		wakeup(rpipe);
375 	} else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
376 		/*
377 		 * If there is no more to read in the pipe, reset
378 		 * its pointers to the beginning.  This improves
379 		 * cache hit stats.
380 		 */
381 		if (rpipe->pipe_buffer.cnt == 0) {
382 			if ((error == 0) && (error = pipelock(rpipe,1)) == 0) {
383 				rpipe->pipe_buffer.in = 0;
384 				rpipe->pipe_buffer.out = 0;
385 				pipeunlock(rpipe);
386 			}
387 		}
388 
389 		/*
390 		 * If the "write-side" has been blocked, wake it up now.
391 		 */
392 		if (rpipe->pipe_state & PIPE_WANTW) {
393 			rpipe->pipe_state &= ~PIPE_WANTW;
394 			wakeup(rpipe);
395 		}
396 	}
397 
398 	if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
399 		pipeselwakeup(rpipe);
400 
401 	return error;
402 }
403 
404 int
405 pipe_write(fp, poff, uio, cred)
406 	struct file *fp;
407 	off_t *poff;
408 	struct uio *uio;
409 	struct ucred *cred;
410 {
411 	int error = 0;
412 	int orig_resid;
413 
414 	struct pipe *wpipe, *rpipe;
415 
416 	rpipe = (struct pipe *) fp->f_data;
417 	wpipe = rpipe->pipe_peer;
418 
419 	/*
420 	 * detect loss of pipe read side, issue SIGPIPE if lost.
421 	 */
422 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
423 		return EPIPE;
424 	}
425 
426 	/*
427 	 * If it is advantageous to resize the pipe buffer, do
428 	 * so.
429 	 */
430 	if ((uio->uio_resid > PIPE_SIZE) &&
431 	    (nbigpipe < LIMITBIGPIPES) &&
432 	    (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
433 	    (wpipe->pipe_buffer.cnt == 0)) {
434 
435 		if (wpipe->pipe_buffer.buffer) {
436 			amountpipekva -= wpipe->pipe_buffer.size;
437 #if defined(UVM)
438 			uvm_km_free(kernel_map,
439 				(vaddr_t)wpipe->pipe_buffer.buffer,
440 				wpipe->pipe_buffer.size);
441 #else
442 			kmem_free(kernel_map,
443 				(vaddr_t)wpipe->pipe_buffer.buffer,
444 				wpipe->pipe_buffer.size);
445 #endif
446 		}
447 
448 		wpipe->pipe_buffer.in = 0;
449 		wpipe->pipe_buffer.out = 0;
450 		wpipe->pipe_buffer.cnt = 0;
451 		wpipe->pipe_buffer.size = BIG_PIPE_SIZE;
452 		wpipe->pipe_buffer.buffer = NULL;
453 		++nbigpipe;
454 	}
455 
456 
457 	if (wpipe->pipe_buffer.buffer == NULL) {
458 		if ((error = pipelock(wpipe,1)) == 0) {
459 			pipespace(wpipe);
460 			pipeunlock(wpipe);
461 		} else {
462 			return error;
463 		}
464 	}
465 
466 	++wpipe->pipe_busy;
467 	orig_resid = uio->uio_resid;
468 	while (uio->uio_resid) {
469 		int space;
470 		space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
471 
472 		/* Writes of size <= PIPE_BUF must be atomic. */
473 		/* XXX perhaps they need to be contiguous to be atomic? */
474 		if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
475 			space = 0;
476 
477 		if (space > 0 &&
478 		    (wpipe->pipe_buffer.cnt < wpipe->pipe_buffer.size)) {
479 			/*
480 			 * This set the maximum transfer as a segment of
481 			 * the buffer.
482 			 */
483 			int size = wpipe->pipe_buffer.size - wpipe->pipe_buffer.in;
484 			/*
485 			 * space is the size left in the buffer
486 			 */
487 			if (size > space)
488 				size = space;
489 			/*
490 			 * now limit it to the size of the uio transfer
491 			 */
492 			if (size > uio->uio_resid)
493 				size = uio->uio_resid;
494 			if ((error = pipelock(wpipe,1)) == 0) {
495 				error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
496 					size, uio);
497 				pipeunlock(wpipe);
498 			}
499 			if (error)
500 				break;
501 
502 			wpipe->pipe_buffer.in += size;
503 			if (wpipe->pipe_buffer.in >= wpipe->pipe_buffer.size)
504 				wpipe->pipe_buffer.in = 0;
505 
506 			wpipe->pipe_buffer.cnt += size;
507 		} else {
508 			/*
509 			 * If the "read-side" has been blocked, wake it up now.
510 			 */
511 			if (wpipe->pipe_state & PIPE_WANTR) {
512 				wpipe->pipe_state &= ~PIPE_WANTR;
513 				wakeup(wpipe);
514 			}
515 
516 			/*
517 			 * don't block on non-blocking I/O
518 			 */
519 			if (fp->f_flag & FNONBLOCK) {
520 				error = EAGAIN;
521 				break;
522 			}
523 
524 			/*
525 			 * We have no more space and have something to offer,
526 			 * wake up selects.
527 			 */
528 			pipeselwakeup(wpipe);
529 
530 			wpipe->pipe_state |= PIPE_WANTW;
531 			error = tsleep(wpipe, (PRIBIO + 1)|PCATCH,
532 			    "pipewr", 0);
533 			if (error)
534 				break;
535 			/*
536 			 * If read side wants to go away, we just issue a
537 			 * signal to ourselves.
538 			 */
539 			if (wpipe->pipe_state & PIPE_EOF) {
540 				error = EPIPE;
541 				break;
542 			}
543 		}
544 	}
545 
546 	--wpipe->pipe_busy;
547 	if ((wpipe->pipe_busy == 0) &&
548 	    (wpipe->pipe_state & PIPE_WANT)) {
549 		wpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTR);
550 		wakeup(wpipe);
551 	} else if (wpipe->pipe_buffer.cnt > 0) {
552 		/*
553 		 * If we have put any characters in the buffer, we wake up
554 		 * the reader.
555 		 */
556 		if (wpipe->pipe_state & PIPE_WANTR) {
557 			wpipe->pipe_state &= ~PIPE_WANTR;
558 			wakeup(wpipe);
559 		}
560 	}
561 
562 	/*
563 	 * Don't return EPIPE if I/O was successful
564 	 */
565 	if ((wpipe->pipe_buffer.cnt == 0) &&
566 	    (uio->uio_resid == 0) &&
567 	    (error == EPIPE))
568 		error = 0;
569 
570 	if (error == 0) {
571 		int s = splhigh();
572 		wpipe->pipe_mtime = time;
573 		splx(s);
574 	}
575 	/*
576 	 * We have something to offer,
577 	 * wake up select.
578 	 */
579 	if (wpipe->pipe_buffer.cnt)
580 		pipeselwakeup(wpipe);
581 
582 	return error;
583 }
584 
585 /*
586  * we implement a very minimal set of ioctls for compatibility with sockets.
587  */
588 int
589 pipe_ioctl(fp, cmd, data, p)
590 	struct file *fp;
591 	u_long cmd;
592 	caddr_t data;
593 	struct proc *p;
594 {
595 	struct pipe *mpipe = (struct pipe *)fp->f_data;
596 
597 	switch (cmd) {
598 
599 	case FIONBIO:
600 		return (0);
601 
602 	case FIOASYNC:
603 		if (*(int *)data) {
604 			mpipe->pipe_state |= PIPE_ASYNC;
605 		} else {
606 			mpipe->pipe_state &= ~PIPE_ASYNC;
607 		}
608 		return (0);
609 
610 	case FIONREAD:
611 		*(int *)data = mpipe->pipe_buffer.cnt;
612 		return (0);
613 
614 	case SIOCSPGRP:
615 		mpipe->pipe_pgid = *(int *)data;
616 		return (0);
617 
618 	case SIOCGPGRP:
619 		*(int *)data = mpipe->pipe_pgid;
620 		return (0);
621 
622 	}
623 	return (ENOTTY);
624 }
625 
626 int
627 pipe_select(fp, which, p)
628 	struct file *fp;
629 	int which;
630 	struct proc *p;
631 {
632 	struct pipe *rpipe = (struct pipe *)fp->f_data;
633 	struct pipe *wpipe;
634 
635 	wpipe = rpipe->pipe_peer;
636 	switch (which) {
637 
638 	case FREAD:
639 		if ((rpipe->pipe_buffer.cnt > 0) ||
640 		    (rpipe->pipe_state & PIPE_EOF)) {
641 			return (1);
642 		}
643 		selrecord(p, &rpipe->pipe_sel);
644 		rpipe->pipe_state |= PIPE_SEL;
645 		break;
646 
647 	case FWRITE:
648 		if ((wpipe == NULL) ||
649 		    (wpipe->pipe_state & PIPE_EOF) ||
650 		    ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) {
651 			return (1);
652 		}
653 		selrecord(p, &wpipe->pipe_sel);
654 		wpipe->pipe_state |= PIPE_SEL;
655 		break;
656 
657 	case 0:
658 		if ((rpipe->pipe_state & PIPE_EOF) ||
659 		    (wpipe == NULL) ||
660 		    (wpipe->pipe_state & PIPE_EOF)) {
661 			return (1);
662 		}
663 
664 		selrecord(p, &rpipe->pipe_sel);
665 		rpipe->pipe_state |= PIPE_SEL;
666 		break;
667 	}
668 	return (0);
669 }
670 
671 int
672 pipe_stat(pipe, ub)
673 	struct pipe *pipe;
674 	struct stat *ub;
675 {
676 	bzero((caddr_t)ub, sizeof (*ub));
677 	ub->st_mode = S_IFIFO;
678 	ub->st_blksize = pipe->pipe_buffer.size;
679 	ub->st_size = pipe->pipe_buffer.cnt;
680 	ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
681 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_atime, &ub->st_atimespec);
682 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_mtime, &ub->st_mtimespec);
683 	TIMEVAL_TO_TIMESPEC(&pipe->pipe_ctime, &ub->st_ctimespec);
684 	/*
685 	 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
686 	 * st_flags, st_gen.
687 	 * XXX (st_dev, st_ino) should be unique.
688 	 */
689 	return 0;
690 }
691 
692 /* ARGSUSED */
693 int
694 pipe_close(fp, p)
695 	struct file *fp;
696 	struct proc *p;
697 {
698 	struct pipe *cpipe = (struct pipe *)fp->f_data;
699 
700 	pipeclose(cpipe);
701 	fp->f_data = NULL;
702 	return 0;
703 }
704 
705 /*
706  * shutdown the pipe
707  */
708 void
709 pipeclose(cpipe)
710 	struct pipe *cpipe;
711 {
712 	struct pipe *ppipe;
713 	if (cpipe) {
714 
715 		pipeselwakeup(cpipe);
716 
717 		/*
718 		 * If the other side is blocked, wake it up saying that
719 		 * we want to close it down.
720 		 */
721 		while (cpipe->pipe_busy) {
722 			wakeup(cpipe);
723 			cpipe->pipe_state |= PIPE_WANT|PIPE_EOF;
724 			tsleep(cpipe, PRIBIO, "pipecl", 0);
725 		}
726 
727 		/*
728 		 * Disconnect from peer
729 		 */
730 		if ((ppipe = cpipe->pipe_peer) != NULL) {
731 			pipeselwakeup(ppipe);
732 
733 			ppipe->pipe_state |= PIPE_EOF;
734 			wakeup(ppipe);
735 			ppipe->pipe_peer = NULL;
736 		}
737 
738 		/*
739 		 * free resources
740 		 */
741 		if (cpipe->pipe_buffer.buffer) {
742 			if (cpipe->pipe_buffer.size > PIPE_SIZE)
743 				--nbigpipe;
744 			amountpipekva -= cpipe->pipe_buffer.size;
745 #if defined(UVM)
746 			uvm_km_free(kernel_map,
747 				(vaddr_t)cpipe->pipe_buffer.buffer,
748 				cpipe->pipe_buffer.size);
749 #else
750 			kmem_free(kernel_map,
751 				(vaddr_t)cpipe->pipe_buffer.buffer,
752 				cpipe->pipe_buffer.size);
753 #endif
754 		}
755 		free(cpipe, M_PIPE);
756 	}
757 }
758 #endif
759 
760 int
761 filt_pipeattach(struct knote *kn)
762 {
763 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
764 
765 	SLIST_INSERT_HEAD(&rpipe->pipe_sel.si_note, kn, kn_selnext);
766 	return (0);
767 }
768 
769 void
770 filt_pipedetach(struct knote *kn)
771 {
772 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
773 
774 	SLIST_REMOVE(&rpipe->pipe_sel.si_note, kn, knote, kn_selnext);
775 }
776 
777 /*ARGSUSED*/
778 int
779 filt_piperead(struct knote *kn, long hint)
780 {
781 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
782 	struct pipe *wpipe = rpipe->pipe_peer;
783 
784 	kn->kn_data = rpipe->pipe_buffer.cnt;
785 
786 	if ((rpipe->pipe_state & PIPE_EOF) ||
787 	    (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
788 		kn->kn_flags |= EV_EOF;
789 		return (1);
790 	}
791 	return (kn->kn_data > 0);
792 }
793 
794 /*ARGSUSED*/
795 int
796 filt_pipewrite(struct knote *kn, long hint)
797 {
798 	struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
799 	struct pipe *wpipe = rpipe->pipe_peer;
800 
801 	if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
802 		kn->kn_data = 0;
803 		kn->kn_flags |= EV_EOF;
804 		return (1);
805 	}
806 	kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
807 
808 	return (kn->kn_data >= PIPE_BUF);
809 }
810